【keras】手写数字识别

prepare.py

import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
#.set(style='white', context='notebook', palette='deep')

# Load the data
train = pd.read_csv("../input/train.csv")
print("原始数据:",train.shape)
train=train.drop(index=range(37000))
print("提取数据:",train.shape)

Y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1)# Drop 'label' column

X_train = (X_train/255.).values.reshape(-1,28,28,1)#Normalize& Reshape 3 dimensions (h= 28, w= 28 , canal = 1)
Y_train = to_categorical(Y_train, num_classes = 10)# Encode labels to one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])

#将数据差分为训练集和验证集 Split the train and the validation set for the fitting #验证validation
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.2, random_state=2)
for i in [X_train, X_val, Y_train, Y_val]:
    print(i.shape)
#test_size是测试数据在总数据中的比例 random_state是随机数种子
#X_train.shape 训练集输入, X_val.shape 测试集输入, Y_train.shape训练集输出, Y_val.shape 测试集输出
np.savez('data',X_train=X_train, Y_train=Y_train,X_val=X_val,Y_val=Y_val)

train.py

import numpy as np

from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau

data=np.load('data.npz')
X_train=data['X_train']
Y_train=data['Y_train']
X_val=data['X_val']
Y_val=data['Y_val']

#1.定义
#1.1定义模型 Set the CNN model
# my CNN architechture is In -> [[Conv2D->relu]*2 -> MaxPool2D -> Dropout]*2 -> Flatten -> Dense -> Dropout -> Out
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
                 activation ='relu', input_shape = (28,28,1)))#out 28 28 32
                 #kernel_size:卷积核大小
                 #padding = 'Same':填充输入以使输出具有与原始输入相同的长度。尽可能两边添加同样数目的零列,如果要添加的列数为奇数个。那么让右边列的个数多一个即可。
                 #filters:输出空间的维度,滤波器的数量
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
                 activation ='relu'))#out 28 28 32
model.add(MaxPool2D(pool_size=(2,2)))#out 28 28 32
model.add(Dropout(0.25))

model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
                 activation ='relu'))#out 28 28 64
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
                 activation ='relu'))#out 28 28 64
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))#out 14 14 64
model.add(Dropout(0.25))#重复多添加一遍模组准确率反而下降了

model.add(Flatten())
model.add(Dense(256, activation = "relu"))#out 256
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))

#1.2定义优化器
#优化器是编译模型的所需的两个核心参数之一(另一个是损失函数)
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
#RMSProp算法加了一个衰减系数来控制历史信息的获取多少,rho与decay都是衰减因子
#lr是学习率,rho为吴恩达视频中得β,epsilon即公式中防止出现0,decay官方文档说明为每次更新学习率下降多少

# Compile the model 编译模型
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])

# Set a learning rate annealer 设置学习率衰退器
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
                                            patience=3,
                                            verbose=1,
                                            factor=0.5,
                                            min_lr=0.00001)

#2训练
#2.1数据增强
datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
        zoom_range = 0.1, # Randomly zoom image
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=False,  # randomly flip images
        vertical_flip=False)  # randomly flip images
datagen.fit(X_train)

#2.2拟合模型Fit the model
batch_size = 43#86#344#172#86
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),#batch_size一次训练所抓取的数据样本数量
                              epochs = 1 ,# Turn epochs to 30 to get 0.9967 accuracy
                              validation_data = (X_val,Y_val),
                              verbose = 1,
                              steps_per_epoch=X_train.shape[0] // batch_size,
                              callbacks=[learning_rate_reduction])
#epochs:整数,数据迭代的轮数
#validation_data:具有三种形式--生成验证集的生成器,(inputs,targets),(inputs,targets,sample_weights)
#verbose:日志显示,0为不在标准输出流输出日志信息,1为输出进度条记录,2为每个epoch输出一行记录
#steps_per_epoch:当生成器一个epoch的执行次数

for i in history.history:
    print(i,history.history[i])
#loss,acc,val_loss,val_acc
#训练过程曲线分析:acc/loss/val_acc/val_loss
#验证集曲线震荡析原因:训练的batch_size太小
#分类问题一般看validation accuracy
#loss 是我们预先设定的损失函数计算得到的损失值;
#accuracy 是模型在数据集上基于给定 label 得到的评估结果

model.save( filepath="modelResult.h5", overwrite=True, include_optimizer=True )
#filepath:保存的路径     overwrite:如果存在源文件,是否覆盖      include_optimizer:是否保存优化器状态

predict.py

import pandas as pd
import numpy as np
import keras
#读取数据
test = pd.read_csv("../input/test.csv")
test=test.loc[0:10]

#预测结果
model = keras.models.load_model( "modelResult.h5" )
test2 = (test / 255.).values.reshape(-1,28,28,1)#数据重组
pred = model.predict(test2)
pred_classes = np.argmax(pred,axis = 1)
print(pred_classes)

#验证预测
def showPic(data):
    import torch
    from torchvision.utils import make_grid
    import cv2
    n_pic=data.shape[0]
    n_pixels=data.shape[1]
    width=int(n_pixels**0.5)
    pwh=data.reshape((n_pic, width, width))#数据重组#(n, 784)->(n, 28, 28)
    pcwh=torch.Tensor(pwh).unsqueeze(1)#在第一层的位置添加只有1个元素的维度
    cwh = make_grid(pcwh)#将多张图片合并,并且生成rgb
    whc=cwh.numpy().transpose((1,2,0))#(高,宽,通道)(32, 242, 3)

    cv2.imshow('img0',whc)
    cv2.waitKey(0)
showPic(test.values)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值