我正在尝试读取100GB的csv文件
他们要读取文件
file = pd.read_csv("../code/csv/file.csv")
like =====> 30%
阅读read_csv时,有什么方法可以查看进度条?或其他文件
答案 0 :(得分:2)
import os
import sys
from tqdm import tqdm
temp = pd.read_csv(INPUT_FILENAME, nrows=20)
N = len(temp.to_csv(index=False))
df = [temp[:0]]
t = int(os.path.getsize(fn)/N*20/10**5) + 1
with tqdm(total = t, file = sys.stdout) as pbar:
for i,chunk in enumerate(pd.read_csv(fn, chunksize=10**5, low_memory=False)):
df.append(chunk)
pbar.set_description('Importing: %d' % (1 + i))
pbar.update(1)
data = temp[:0].append(df)
del df
答案 1 :(得分:1)
带有 typer
模块的精美输出,我在 Jupyter Notebook 中测试过,其中包含一个具有 618k 行的大量分隔文本文件。
from pathlib import Path
import pandas as pd
import tqdm
import typer
txt = Path("<path-to-massive-delimited-txt-file>").resolve()
# read number of rows quickly
length = sum(1 for row in open(txt, 'r'))
# define a chunksize
chunksize = 5000
# initiate a blank dataframe
df = pd.DataFrame()
# fancy logging with typer
typer.secho(f"Reading file: {txt}", fg="red", bold=True)
typer.secho(f"total rows: {length}", fg="green", bold=True)
# tqdm context
with tqdm.auto.tqdm(total=length, desc="chunks read: ") as bar:
# enumerate chunks read without low_memory (it is massive for pandas to precisely assign dtypes)
for i, chunk in enumerate(pd.read_csv(txt, chunksize=chunksize, low_memory=False)):
# print the chunk number
print(i)
# append it to df
df = df.append(other=chunk)
# update tqdm progress bar
bar.update(chunksize)
# 6 chunks are enough to test
if i==5:
break
# finally inform with a friendly message
typer.secho("end of reading chunks...", fg=typer.colors.BRIGHT_RED)
typer.secho(f"Dataframe length:{len(df)}", fg="green", bold=True)
答案 2 :(得分:0)
尝试一下:
import sys
from time import sleep
from tqdm import tqdm
import pandas as pd
values = range(1)
with tqdm(total=len(values), file=sys.stdout) as pbar:
for i in values:
pbar.set_description('Importing: %d' % (1 + i))
pbar.update(1)
sleep(1)
df = pd.read_csv("......\\<file name>.csv", sep =";", dtype='unicode')