此为改编自cifar10的三分类问题:
from __future__ import print_function #此为在老版本的python中兼顾新特性的一种方法
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
from keras import backend as K
from keras import regularizers
from keras.applications.imagenet_utils import preprocess_input
import scipy.misc
import numpy as np
import matplotlib.pyplot as plt
import h5py
#from keras.datasets import cifar10
#from keras.layers.core import Lambda
#from matplotlib.pyplot import imshow
class teeth3vgg:
def __init__(self,train=False):
self.num_classes = 3
self.weight_decay = 0.0005 #权值衰减,目的是防止过拟合
self.x_shape = [32,32,3]
self.model = self.build_model()
if train:
self.model = self.train(self.model)
else:
#self.model.load_weights('weight.h5')
# 加载模型数据和weights
self.model = model_from_json(open('my_model_architecture.json').read())
self.model.load_weights('my_model_weights.h5')
def build_model(self):
# Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.
model = Sequential()
weight_decay = self.weight_decay
model.add(Conv2D(64, (3, 3), padding='same',input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
#kernel_regularizer表示施加在权重上的正则项
model.add(Activation('relu'))
model.add(BatchNormalization())
#该层在每个batch(批次)上将前一层的激活值重新规范化,即使得其输出数据的均值接近0,其标准差接近1
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))