一文搞定Keras快速入门

1 简介

Keras 是谷歌公司基于 TensorFlow 的DL开源框架,接口简便,适合初学者。本文将通过 MNIST 手写数字识别案例剖析 Keras 的常用接口。

2 NN

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD

# 载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
print('x_shape:',x_train.shape)
print('y_shape:',y_train.shape)

# 转换数据格式:(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0

# 转为独热编码
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)# 模型:784-10
model = Sequential([Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')])# SGD优化器:学习率0.2
sgd = SGD(lr=0.2)
# adam = Adam(lr=0.001) # 优化器,loss,准确率
model.compile(
	optimizer = sgd, # 优化器还有adam
	loss = 'mse',  # loss还有'categorical_crossentropy'
	metrics=['accuracy'],
	)# 训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)print('\ntest loss',loss)
print('accuracy',accuracy)

x_shape: (60000, 28, 28)
y_shape: (60000,)
test loss 0.0130496878728
accuracy 0.9189

2.1 Dropout
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.optimizers import SGD

# 载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
print('x_shape:',x_train.shape)
print('y_shape:',y_train.shape)

# 转换数据格式:(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0

# 转为独热编码
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)

# 创建模型:784-200-100-10
model = Sequential([
        Dense(units=200,input_dim=784,bias_initializer='one',activation='tanh'),
        Dropout(0.4),
        Dense(units=100,bias_initializer='one',activation='tanh'),
        Dropout(0.4),
        Dense(units=10,bias_initializer='one',activation='softmax')
    ])# SGD优化器:学习率0.2
sgd = SGD(lr=0.2)# 优化器,loss,准确率
model.compile(
    optimizer = sgd,
    loss = 'categorical_crossentropy',
    metrics=['accuracy'],
)# 训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('test accuracy',accuracy)
​
loss,accuracy = model.evaluate(x_train,y_train)
print('train loss',loss)
print('train accuracy',accuracy)

x_shape: (60000, 28, 28)
y_shape: (60000,)
test loss 0.107040329377
test accuracy 0.9692
train accuracy 0.97735

2.2 正则化
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.regularizers import l2

# 载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
print('x_shape:',x_train.shape)
print('y_shape:',y_train.shape)

# 转换数据格式:(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0

# 转为独热编码
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)

# 创建模型:784-200-100-10
model=Sequential([
	Dense(units=200,input_dim=784,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)),
    Dense(units=100,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)),
    Dense(units=10,bias_initializer='one',activation='softmax',kernel_regularizer=l2(0.0003))
])# SGD优化器:学习率0.2
sgd = SGD(lr=0.2)# 优化器,loss,准确率
model.compile(
    optimizer = sgd,
    loss = 'categorical_crossentropy',
    metrics=['accuracy'],
)# 训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('test accuracy',accuracy)
​
loss,accuracy = model.evaluate(x_train,y_train)
print('train loss',loss)
print('train accuracy',accuracy)

x_shape: (60000, 28, 28)
y_shape: (60000,)
test loss 0.164071698987
test accuracy 0.9768
train accuracy 0.98785

3 CNN

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout,Convolution2D,MaxPooling2D,Flatten
from keras.optimizers import Adam

(x_train,y_train),(x_test,y_test) = mnist.load_data()
# 转数据格式:(60000,28,28)->(60000,28,28,1)
x_train = x_train.reshape(-1,28,28,1)/255.0
x_test = x_test.reshape(-1,28,28,1)/255.0
# 转独热编码
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)# 定义顺序模型
model = Sequential()# 第一个卷积层
model.add(Convolution2D(
    input_shape = (28,28,1),
    filters = 32, # 卷积核个数
    kernel_size = 5, # 卷积窗口大小
    strides = 1,
    padding = 'same',
    activation = 'relu'
))
# 第一个池化层
model.add(MaxPooling2D(
    pool_size = 2,
    strides = 2,
    padding = 'same',
))

# 第二个卷积层
model.add(Convolution2D(64,5,strides=1,padding='same',activation = 'relu'))
# 第二个池化层
model.add(MaxPooling2D(2,2,'same'))
# 把第二个池化层的输出扁平化为1维
model.add(Flatten())

# 第一个全连接层
model.add(Dense(1024,activation = 'relu'))
# Dropout
model.add(Dropout(0.5))
# 第二个全连接层
model.add(Dense(10,activation='softmax'))# adam优化器
adam = Adam(lr=1e-4)# 优化器,loss,准确率
model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['accuracy'])# 训练模型
model.fit(x_train,y_train,batch_size=64,epochs=10)# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)print('test loss',loss)
print('test accuracy',accuracy)

test loss 0.022989032609
test accuracy 0.9923

4 RNN

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.recurrent import SimpleRNN
from keras.optimizers import Adam

input_size = 28 # 列
time_steps = 28 # 行
cell_size = 50 # 隐藏层cell个数(x_train,y_train),(x_test,y_test) = mnist.load_data()
x_train = x_train/255.0
x_test = x_test/255.0
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)#one hot# 创建模型
model = Sequential()# 循环神经网络
model.add(SimpleRNN(
    units = cell_size, # 输出
    input_shape = (time_steps,input_size), # 输入
))# 输出层
model.add(Dense(10,activation='softmax'))# adam优化器
adam = Adam(lr=1e-4)# 优化器,loss,准确率
model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['accuracy'])# 训练模型
model.fit(x_train,y_train,batch_size=64,epochs=10)# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)print('test loss',loss)
print('test accuracy',accuracy)

test loss 0.309194719088
test accuracy 0.9134

5 模型保存与载入

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.models import load_model

(x_train,y_train),(x_test,y_test) = mnist.load_data()
print('x_shape:',x_train.shape)
print('y_shape:',y_train.shape)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)# 载入模型
model = load_model('model.h5')# 训练模型
model.fit(x_train,y_train,batch_size=64,epochs=2)# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)print('\ntest loss',loss)
print('accuracy',accuracy)

# 保存模型
model.save('my_model.h5')

test loss 0.0154596293474
accuracy 0.9065

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值