model模块

from keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import datasets
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np


class LeNet:
    @staticmethod
    def build(width, height, depth, classes, weightsPath=None):
        # initialize the model
        model = Sequential()

        # first set of CONV => RELU => POOL valid
        model.add(Convolution2D(20, 5, 5, border_mode="valid",
                                input_shape=(depth, height, width)))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # second set of CONV => RELU => POOL
        model.add(Convolution2D(50, 5, 5, border_mode="valid"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # set of FC => RELU layers
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))
        # softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        # if weightsPath is specified load the weights
        if weightsPath is not None:
            model.load_weights(weightsPath)

        return model
print("[INFP] accessing MNIST...")
# 从磁盘加载MNIST数据集
dataset=datasets.fetch_mldata("MNIST Original")
data=datasets.data

# reshape the design matrix such that the matrix is:
# num_sample x depth x row x columns
if K.image_data_format()=="channels_first":
    data=data.reshape(data.shape[0],1,28,28)
#otherwise,we are using "channels last"ordering,so the design matrix shape should be:
else:
    data=data.reshape(data.shape[0],28,28,1)

# scale the input data to the range[0,1] and perform a train/test split
(trainX,testX,trainY,testY)=train_test_split(data/255.0, dataset.target.astype("int"),test_size=0.25,random_state=42)
# convert the labels from integers to vectors
le=LabelBinarizer()
trainY=le.fit_transform(trainY)
testY=le.transform(testY)

# initialize the optimizer and model
print("[INFO] compiling model...")
opt=SGD(lr=0.01)
model=LeNet.build(width=28,height=28,depth=1,classes=10)
model.compile(loss="categorical_crossentropy",optimizer=opt,metrics=["accuracy"])
# train the network
print("[INFO] training network...")
H=model.fit(trainX,trainY,validation_data=(testX,testY),batch_size=128,epochs=20,verbose=1)

# evaluates the network
print("[INFO] evaluating network...")
predictions=model.predict(testX,batch_size=128)
print(classification_report(testY.argmax(axis=1),predictions.argmax(axis=1),target_names=[str(x) for x in le.classes_]))
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0,20),H.history["loss"],label="train_loss")
plt.plot(np.arange(0,20),H.history["val_loss"],label="val_loss")
plt.plot(np.arange(0,20),H.history["acc"],label="train_acc")
plt.plot(np.arange(0,20),H.history["val_acc"],label="val_acc")
plt.title("Training Loss and Accuracy")
plt.legend()
plt.show()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值