背景
最近工作时碰上一个糟心事,本来是一个vlookup就能解决的事情,由于其中一个csv文件过大(近700M),我的电脑只要一打开就报内存不足的错误。
于是我决定采用pandas的chunksize属性来分批读取并匹配相关数据,详细代码如下:
import pandas as pd
from tkinter import filedialog
totalparm = filedialog.askopenfilename(title="请选择****表")
parm = pd.read_excel(totalparm, encoding='gbk', low_memory=True, sheet_name='保护带小区')
totaldata = filedialog.askopenfilename(title="请选择***表")
ttdata=pd.read_csv(totaldata,encoding='gbk',chunksize=5000,low_memory=True,usecols=['CGI','maxNumX2LinksIn','maxNumX2LinksOut','actUeBasedAnrIntraFreqLte','actUeBasedAnrInterFreqLte','eutraCarrierInfo','dlCarFrqEut'])
count=0
basedata=''
parm['公网CGI']=parm['公网CGI'].astype(str)
for chunk in ttdata:
chunk['CGI']=chunk['CGI'].astype(str)
if count==0:
fird = pd.merge(parm,chunk,left_on='公网CGI',how="left",right_on="CGI")
print(fird)
basedata=fird
count+=1
else:
otherd = pd.merge(parm, chunk, left_on='公网CGI', how="left", right_on="CGI")
basedata.update(otherd,join="left",overwrite=False,errors="ignore")
basedata.to_csv('result.csv')