本文中记录的是如何利用pandas来读取大文件,4个技巧:
filepath = open("taobao.csv",errors="ignore") # 指定文件路径
reader = pd.read_csv(filepath,
header=None,
names=["user_id","item_id","cat_id","type","time"], # 指定列属性名称
iterator=True)
# loop,chunkSize,chunks = True, 10000000, [] # 连续赋值语句
loop = True
chunkSize = 10000000
chunks = []
while loop: # loop一直为True,执行循环
try:
chunk = reader.get_chunk(chunkSize)
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped.")
# 如果考虑数据量过大,只抽取部分的数据来进行分析,采取不放回抽样的方式
# pd.concat(chunks, ignore_index=True).sample(frac=0.05, replace=False) 不放回抽样、记录不重复
df = pd.concat(chunks, ignore_index=True)