1.保存成npy的自定义数据集
import os
import numpy as np
import random
import pickle
def read_file(file_path):
file = open(file_path, 'r')
lines = file.readlines()
content = []
for line in lines:
line = line.replace("\t", " ")
line_split = line.strip("\r\n\t").strip().split(" ")
content.append(line_split)
return content
def get_file_list(folder):
file_list = []
for file in os.listdir(folder):
file_list.append(os.path.join(folder, file))
return file_list
if __name__ == '__main__':
all_file = get_file_list("文件夹路径")
data = [(read_file(item)) for item in all_file]
data = np.array(data)
np.save('xxxxx.npy', data)
print(x)
2.用于二分类的自定义数据集
import os
import numpy as np
import random
import pickle
def read_file(file_path):
file = open(file_path, 'r')
lines = file.readlines()
content = []
for line in lines:
line = line.replace("\t", " ")
line_split = line.strip("\r\n\t").strip().split(" ")
content.append(line_split)
return content
def get_file_list(folder):
file_list = []
for file in os.listdir(folder):
file_list.append(os.path.join(folder, file))
return file_list
def savepkl(data, path):
file = open(path, 'wb')
pickle.dump(data, file)
file.close()
if __name__ == '__main__':
FOLDERS = [
{"class": 1, "folder": "分类为1的文件夹路径",
{"class": 0, "folder": "分类为0的文件夹路径"
]
all_file = [(item, folder["class"]) for folder in FOLDERS for item in get_file_list(folder["folder"])]
random.shuffle(all_file)
x = [(read_file((item[0]))) for item in all_file]
y = [(item[1]) for item in all_file]
x_np = np.array(x)
y_np = np.array(y)
# 保存数据和标签的pkl
x_pklfile = "XXX.pkl"
y_pklfile = "XXX.pkl"
savepkl(x_np, x_pklfile)
savepkl(y_np, y_pklfile)
3.利用保存的模型输出评价指标
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score, accuracy_score
import pickle
import numpy as np
from keras.models import load_model
from keras.utils import np_utils
if __name__ == '__main__':
# 输入数据、标签和模型的地址
pklfilex = "xxx.pkl"
pklfiley = "xxx.pkl"
model_name = 'xxx.h5
# 导入数据、标签和模型
np_all_samples_x = pickle.load(open(pklfilex, 'rb'))
np_all_samples_y = pickle.load(open(pklfiley, 'rb'))
np_all_samples_x = np.array(np_all_samples_x, dtype=int)
file_num = len(np_all_samples_y)
sub_file_num = int(file_num / FOLD)
x_test = np_all_samples_x[0: sub_file_num] # The samples for testing
y_test = np_all_samples_y[0: sub_file_num] # The label of the samples for testing
# 模型预测
model = load_model(model_name)
y_pred = np.argmax(model.predict(x_test), axis=1)
# 输出评价指标
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred, average='binary')
f1score = f1_score(y_test, y_pred, average='binary')
MiF1 = f1_score(y_test, y_pred, average='micro')
MaF1 = f1_score(y_test, y_pred, average='macro')
print('accuracy:', accuracy, 'precision:', precision)
print('recall:', recall, 'f1score:', f1score)
print('Macro-F1: {}'.format(MaF1), 'Micro-F1: {}'.format(MiF1))