# encoding: utf-8
import sys
import os
import jieba
import importlib
importlib.reload(sys)
from sklearn.datasets.base import Bunch
import pickle
bunch = Bunch(target_name=[],label=[],filenams=[],contents=[])
def savefile(savepath,content):
fp = open(savepath,"w",encoding='gb2312',errors='ignore')
fp.write(content)
fp.close()
def readfile(path):
fp = open(path,"r",encoding='gb2312',errors='ignore')
content = fp.read()
fp.close()
return content
wordbag_path = "F://研究生项目//Bunch分词后的文本语料库//train_set.dat"
seg_path = "F://研究生项目//Bunch分词后的文本语料库/"
catelist = os.listdir(seg_path)
bunch.target_name.extend(catelist) #将类别信息保存到Bunch对象中
for mydir in catelist:
class_path = seg_path+mydir+"/"
file_list = os.listdir(class_path)
for file_path in file_list:
fullname = class_path+"/"+file_path
bunch.label.append(mydir) #保存当前文件的分类标签
bunch.filenams.append(fullname) #保存当前文件的文件路径
bunch.contents.append(readfile(fullname).strip()) #保存文件的词向量
#Bunch对象持久化
file_obj = open(wordbag_path,"wb+")
pickle.dump(bunch,file_obj)
file_obj.close()
print("构建文本结束了")