Keras简单梳理

Keras基础

首先你必须安装TensorFlow或者其他keras支持的后端

keras的官方中文说明文档链接:Keras中文说明文档

有了keras之后,搭建网络结构会变得非常方便;

keras实现线性回归

先举一个例子看下keras有多方便,以下是keras实现线性回归的一个例子:

import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense

# 生成100个随机点
x_data = np.random.rand(100)
noise = np.random.normal(0,0.01,x_data.shape)
y_data = x_data * 0.1 + 0.2 + noise

# 定义模型
model = Sequential()
model.add(Dense(units=1,input_dim=1))
# sgd:随机梯度下降;mse:均方误差;
model.compile(optimizer='sgd',loss='mse')

# 训练
for step in range(3001):
    cost = model.train_on_batch(x_data,y_data)
    if step % 300 == 0:
        print('step:',step,';cost:',cost)

W, b = model.layers[0].get_weights()
print('W:',W,';b:',b)

# 预测
y_pred = model.predict(x_data)

# 画图
plt.scatter(x_data,y_data)
plt.plot(x_data,y_pred,'r',lw=3)
plt.show()

画图结果如下:
在这里插入图片描述

我们看到搭建网络结构的只有两句话

model = Sequential()和model.add(Dense(units=1,input_dim=1))

非常方便;

keras实现非线性回归

非线性怎么办?多加几层,搞个激活函数就行了;

import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD

# 随机生成200个点
x_data = np.linspace(-0.5,0.5,200)
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise

# 定义模型
model = Sequential()
model.add(Dense(units=10,input_dim=1))
model.add(Activation('tanh'))
model.add(Dense(units=1,activation='tanh'))
sgd = SGD(lr=0.1)
model.compile(optimizer=sgd,loss='mse')

for step in range(6001):
    cost = model.train_on_batch(x_data,y_data)
    if step % 300 == 0:
        print('step:',step,'cost:',cost)
    
# 预测
y_pred = model.predict(x_data)

# 画图
plt.scatter(x_data,y_data)
plt.plot(x_data,y_pred,'r',lw=3)
plt.show()

画图结果如下:
在这里插入图片描述

mnist手写数字识别

这个例子中将网络的搭建换一种写法,不再使用add,而是直接在Sequential()里面写一个列表;

训练也不再使用train_on_batch方法单批次训练,而是使用fit方法,更加方便;

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils import np_utils
from keras.optimizers import SGD
from keras.regularizers import l2

# 载入数据
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# 数据预处理
x_train = x_train.reshape(x_train.shape[0],-1)/255.
x_test = x_test.reshape(x_test.shape[0],-1)/255.
# 将y值转为one-hot编码
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)

model = Sequential([
    # 参数依次为:单元数、输入维度、偏置、激活函数、正则化
    Dense(units=200,input_dim=784,bias_initializer='one',activation='relu',kernel_regularizer=l2(0.0003)),
    Dropout(0.4),
    Dense(units=100,bias_initializer='one',activation='relu'),
    Dropout(0.4),
    Dense(units=10,activation='softmax')
])

sgd = SGD(lr=0.2)

model.compile(
    optimizer = sgd,
    # 交叉熵
    loss = 'categorical_crossentropy',
    metrics = ['accuracy']
)

# 训练
model.fit(x_train,y_train,batch_size=32,epochs=10)

# 模型评估
loss, accuracy = model.evaluate(x_test,y_test)
print("loss:",loss)
print("accuracy:",accuracy)

准确率达到96%;

给某一层加上正则化或者再某一层之后添加Dropout都能起到防止过拟合的作用;

keras实现cnn

那些个什么卷积层池化层都封装好了,只想像添加Dense一样用就行了;

还是那个mnist的例子,图像用cnn来做效果会好很多,准确率99%;

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Convolution2D, MaxPool2D, Flatten, Dropout
from keras.utils import np_utils
from keras.optimizers import Adam

# 载入数据
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# 数据预处理
x_train = x_train.reshape(-1,28,28,1)/255.
x_test = x_test.reshape(-1,28,28,1)/255.
print(type(x_train))
# 将y值转为one-hot编码
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)

model = Sequential()
model.add(Convolution2D(
    input_shape = (28,28,1),
    filters = 32,
    kernel_size=5,
    strides=1,
    padding='same',
    activation='relu'
))

model.add(MaxPool2D(
    pool_size=2,
    strides=2,
    padding='same'
))
model.add(Convolution2D(64,5,strides=1,padding='same',activation='relu'))
model.add(MaxPool2D(2,2,'same'))
model.add(Flatten())
model.add(Dense(1024,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10,activation='softmax'))

adam = Adam(lr=0.001)

model.compile(
    optimizer = adam,
    loss = 'categorical_crossentropy',
    metrics = ['accuracy']
)

# 训练
model.fit(x_train,y_train,batch_size=64,epochs=4)

# 模型评估
loss, accuracy = model.evaluate(x_test,y_test)
print("loss:",loss)
print("accuracy:",accuracy)

keras实现lstm

lstm的layer也是实现好的,直接调用就完事儿了;

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.utils import np_utils
from keras.optimizers import Adam

# 载入数据
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# 数据预处理
time_steps = 28
input_size = 28
cell_size = 50

x_train = x_train/255.
x_test = x_test/255.
# 将y值转为one-hot编码
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)

model = Sequential([
    LSTM(units=cell_size,input_shape=(time_steps,input_size)),
    Dense(10,activation='softmax')
])

adam = Adam(lr=0.001)

model.compile(
    optimizer = adam,
    loss = 'categorical_crossentropy',
    metrics = ['accuracy']
)

# 训练
model.fit(x_train,y_train,batch_size=64,epochs=10)

# 模型评估
loss, accuracy = model.evaluate(x_test,y_test)
print("loss:",loss)
print("accuracy:",accuracy)

这个例子中准确率达到97%;

模型的保存与载入

先安装一个库:pip install h5py

保存模型model.save('lstm_mnist.h5')

加载模型:

from keras.models import load_model

model = load_model('lstm_mnist.h5')

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值