- batch_size一次喂进去的data,比如喂总数据量10000,就需要迭代20次才完成一个epoch。
train_history = model.fit(x=train_Xs,
y=train_Ys,
validation_split=0,
epochs=30,
batch_size=500,
verbose=2)
- 预处理,将图像读到list,再转array然后进行归一化。
def pre_deal(file_names,satrt_num,data_lenth,input_size_1 = input_size_1, input_size_2 = input_size_2):
files_ = file_names[satrt_num:(satrt_num+data_lenth)]
x_imgs = []
y_imgs = []
for i in range(data_lenth):
x_img = cv2.imread('dataset/img/'+files_[i]).astype("float")
y_img = cv2.imread('dataset/contours/'+files_[i]).astype("float")
x_img = cv2.resize(x_img,(input_size_1,input_size_2))
y_img = cv2.resize(y_img,(input_size_1,input_size_2))
x_img /= 255
y_img /= 255
x_imgs.append(x_img)
y_imgs.append(y_img)
print x_imgs[2].shape,len(x_imgs)
in_imgs = np.array(x_imgs) #将 list 转化为一个 array
out_imgs = np.array(y_imgs)
print in_imgs.shape
IN_imgs = in_imgs.reshape(in_imgs.shape[0],input_size_1,input_size_2,3).astype('float32')
OUT_imgs = out_imgs.reshape(out_imgs.shape[0],input_size_1,input_size_2,3).astype('float32')
return IN_imgs,OUT_imgs
分多次进行训练(效果并不好),因为一次不能装完所有数据,内存限制
for num in range(1):
print ("group :",num)
train_Xs, train_Ys = pre_deal(train_files,num*10000,10000)
train_history = model.fit(x=train_Xs,
y=train_Ys,
validation_split=0,
epochs=30,
batch_size=500,
verbose=2)
save_model(model, '7_12unet4.h5')