In case of a large file you can use pandas.read_csv
with the chunksize
argument which allows to read the dataset per chunk:
import pandas as pd
INPUT_CSV = "input.csv"
OUTPUT_CSV = "output.csv"
CHUNKSIZE = 1_000 # Maximum number of rows in memory
header = True
mode = "w"
for chunk_df in pd.read_csv(INPUT_CSV, chunksize=CHUNKSIZE):
chunk_df["Berry"] = chunk_df["Name"]
# You apply any other transformation to the chunk
# ...
chunk_df.to_csv(OUTPUT_CSV, header=header, mode=mode)
header = False # Do not save the header for the other chunks
mode = "a" # 'a' stands for append mode, all the other chunks will be appended
If you want to update the file inplace, you can use a temporary file and erase it at the end
import pandas as pd
INPUT_CSV = "input.csv"
TMP_CSV = "tmp.csv"
CHUNKSIZE = 1_000 # Maximum number of rows in memory
header = True
mode = "w"
for chunk_df in pd.read_csv(INPUT_CSV, chunksize=CHUNKSIZE):
chunk_df["Berry"] = chunk_df["Name"]
# You apply any other transformation to the chunk
# ...
chunk_df.to_csv(TMP_CSV, header=header, mode=mode)
header = False # Do not save the header for the other chunks
mode = "a" # 'a' stands for append mode, all the other chunks will be appended
os.replace(TMP_CSV, INPUT_CSV)