1. 环境 keras
2.准备数据集,分为train和test,train里有两个文件夹——image和label,结构如下
3. 编写代码
① 网络模型:unet、segnet、refinenet、reseg
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Activation, BatchNormalization, add
from keras.optimizers import Adam
def unet(pretrained_weights = None,input_size = (512,512,1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(input = inputs, output = conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
#model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
def segnet(input_size=(512, 512, 1)):
inputs = Input(input_size)
# Encoder
conv1 = Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(inputs)
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, padding='same', kernel_initializer='he_normal')(pool1)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# Decoder
up1 = UpSampling2D(size=(2, 2))(pool2)
conv3 = Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(up1)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
up2 = UpSampling2D(size=(2, 2))(conv3)
conv4 = Conv2D(1, 3, padding='same', kernel_initializer='he_normal')(up2)
conv4 = BatchNormalization()(conv4)
conv4 = Activation('sigmoid')(conv4)
model = Model(inputs=inputs, outputs=conv4)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
return model
def refinenet(input_size=(512, 512, 1)):
inputs = Input(input_size)
# Encoder
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# Decoder
up1 = UpSampling2D(size=(2, 2))(pool2)
merge1 = concatenate([conv2, up1], axis=3)
conv3 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
conv3 = BatchNormalization()(conv3)
up2 = UpSampling2D(size=(2, 2))(conv3)
merge2 = concatenate([conv1, up2], axis=3)
conv4 = Conv2D(1, 3, activation='sigmoid', padding='same', kernel_initializer='he_normal')(merge2)
conv4 = BatchNormalization()(conv4)
model = Model(inputs=inputs, outputs=conv4)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
return model
def reseg(input_size=(512, 512, 1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = BatchNormalization()(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = BatchNormalization()(conv5)
up6 = UpSampling2D(size=(2, 2))(conv5)
merge6 = concatenate([conv4, up6], axis=3)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = BatchNormalization()(conv6)
up7 = UpSampling2D(size=(2, 2))(conv6)
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = BatchNormalization()(conv7)
up8 = UpSampling2D(size=(2, 2))(conv7)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = BatchNormalization()(conv8)
up9 = UpSampling2D(size=(2, 2))(conv8)
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = BatchNormalization()(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
return model
② 读入数据,进行训练
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate
from keras.models import Model
from keras.preprocessing.image import img_to_array, load_img
from keras.callbacks import ModelCheckpoint
import os
from glob import glob
import numpy as np
from sklearn.model_selection import train_test_split
from model import *
import matplotlib.pyplot as plt
def load_data(image_folder, mask_folder):
images = sorted(glob(os.path.join(image_folder, "*")))
masks = sorted(glob(os.path.join(mask_folder, "*")))
X = []
Y = []
for img, mask in zip(images, masks):
X.append(img_to_array(load_img(img, color_mode="grayscale")) / 255.)
Y.append(img_to_array(load_img(mask, color_mode="grayscale")) / 255.)
X = np.array(X)
Y = np.array(Y)
return X, Y
image_folder = 'E:/fenge/unet-master/data_fenge/train/image'
mask_folder = 'E:/fenge/unet-master/data_fenge/train/label'
X, Y = load_data(image_folder, mask_folder)
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.1, random_state=42)
for j in range (4):
if j == 0:
#使用unet进行训练
model = unet()
model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=2, epochs=30, validation_data=(X_val, Y_val), callbacks=[model_checkpoint])
if j == 1:
#使用segnet进行训练
model = segnet()
model_checkpoint = ModelCheckpoint('segnet.hdf5', monitor='loss', verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=2, epochs=30, validation_data=(X_val, Y_val), callbacks=[model_checkpoint])
if j == 2:
#使用refinenet进行训练
model = refinenet()
model_checkpoint = ModelCheckpoint('refinenet.hdf5', monitor='loss', verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=2, epochs=30, validation_data=(X_val, Y_val), callbacks=[model_checkpoint])
if j == 3:
#使用reseg进行训练
model = reseg()
model_checkpoint = ModelCheckpoint('resegnet.hdf5', monitor='loss', verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=2, epochs=30, validation_data=(X_val, Y_val), callbacks=[model_checkpoint])
③ 测试(先建立result文件夹)
from model import *
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate
from keras.models import Model
from keras.preprocessing.image import img_to_array, load_img
from keras.callbacks import ModelCheckpoint
import os
from glob import glob
import numpy as np
from sklearn.model_selection import train_test_split
from model import *
import matplotlib.pyplot as plt
# #使用unet
model = unet()
model.load_weights("unet.hdf5")
# #使用segnet
# model = segnet()
# model.load_weights("segnet.hdf5")
#使用refinenet
# model = refinenet()
# model.load_weights("refinenet.hdf5")
#使用resegnet
# model = reseg()
# model.load_weights("resegnet.hdf5")
test_images = sorted(glob('E:/fenge/unet-master/data_fenge/test/*'))
for i, test_img in enumerate(test_images):
img = img_to_array(load_img(test_img, color_mode="grayscale")) / 255.
img = np.expand_dims(img, axis=0)
pred = model.predict(img)
pred_image = (pred[0, :, :, 0] * 255.).astype(np.uint8)
plt.imsave(f'result/unet/pre_{i}.png', pred_image, cmap='gray')
# plt.imsave(f'result/segnet/pre_{i}.png', pred_image, cmap='gray')
# plt.imsave(f'result/refinenet/pre_{i}.png', pred_image, cmap='gray')
# plt.imsave(f'result/resegnet/pre_{i}.png', pred_image, cmap='gray')