model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1))
32表示通道数,即特征的数量,一般是32或64(第一层32,之后64,再展平)
池化不改变特征数,改变图像的长和高
# 卷积-池化,多次循环后展平
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu')
model.add(layers.Dense(10, activation='softmax')
## 函数式API:神经网络每一层都看作张量
# 把神经网络层当做函数,接收张量并返回张量
from keras。models import Sequential,Model
from keras import layers
from keras import Input
seq_model = Sequential()
seq_model.add(layers.Dense(32,activation='relu',input_shape=(64,)))
seq_model.add(layers.Dense(32,activation='relu'))
seq_model.add(layers.Dense(10,activation='softmax'))
# 对应的函数式API实现
input_tensor = Input(shape=(64,))
x = layers.Dense(32,activation='relu')(input_tensor)
x = layers.Dense(32,activation='relu')(x)
output_tensor = layers.Dense(10,activation='softmax')(x)
# Model类将输入张量和输出张量转换为一个模型
model = Model(input_tensor,output_tensor)
# 查看模型
model.summary()
# print(model.summary())
## 多输入神经网络
from keras。models import Model
from keras import layers
from keras import Input
text_vocabulary_size = 10000
question_vocabular_size = 10000
answer_vocabular_size = 500
# 文本输入是一个长度可变的整数序列,可以选择对输入进行命名
text_input = Input(shape=(None,),dtype ='int32', name='text')
# 将输入嵌入长度为64的向量
embedded_text = layers,Embedding(text_vocabular_size, 64)(text_input)
# 利用LSTM将向量编码为单个向量
encoded_text = layers.LSTM(32)(embedded_text)
# 对问题进行相同的处理
question_input = Input(shape=(None,),dtype='int32',name='question')
embedded_question = layers,Embedding(text_vocabular_size, 32)(text_input)
encoded_question = layers.LSTM(16)(embedded_text)
# 将编码后的问题和文本连接
concatenated = layers.concatene([encoded_text,encoded_question],axis=-1)
# 添加softmax分类器
answer = layers.Dense(answer_vocabular_size,activation='softmax')(concatenated)
# 在模型实例化时,指定两个输入和一个输出
model = Model([text_input,question_input],answer)
model.compile(optimizer='resporp',loss='categorical_crossentropy',metrics=['acc'])
......
# 拟合
## 列表
model.fit([text,question],answers,epochs=10,batch_size=128)
## 字典(需要对输入进行命名)
model.fit({'text':text,'question':question},answers,epochs=10,batch_size=128)
## 多输出神经网络
from keras。models import Model
from keras import layers
from keras import Input
vocabulary_size = 50000
num_income_groups = 10
posts_input = Input(shape=(None,),dtype='int32',name='post')
embedded_posts = layers.Embedding(256,vocabular_size)(posts_input)
x = layers.Conv1D(128,5,activation='relu')(embedded_post)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(256,5,activation='relu')(x)
x = layers.Conv1D(256,5,activation='relu')(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(256,5,activation='relu')(x)
x = layers.Conv1D(256,5,activation='relu')(x)
x = layers.GlobalMaxPooling1D(5)(x)
x = layers.Dense(128,activation='relu')(x)
# 输出层,不一样的名字
age_prediction = layers.Dense(1,name='age')(x)
income_prediction = layers.Dense(num_income_groups,activation='softmax',name='income')(x)
gender_prediction = layers.Dense(1,activation='sigmoid',name='gender')(x)
model = Model(posts_input,[age_prediction,income_prediction,gender_prediction])
#1、 多输出模型的编译选项:多重损失
## 列表
model.compile(optimizer='rmsprop',loss=['mse','categorical_crossentropy','binary_crossentropy'])
## 字典(需要对输出进行命名)
model.compile(optimizer='rmsprop',loss{'age':'mse','income':'categorical_crossentropy','gender':'binary_crossentropy'})
#2、 多输出模型的编译选项:损失加权
## 列表
model.compile(optimizer='rmsprop',
loss=['mse','categorical_crossentropy','binary_crossentropy'],
loss_weightd=[0.25, 1. ,10.])
## 字典(需要对输出进行命名)
model.compile(optimizer='rmsprop',
loss{'age':'mse','income':'categorical_crossentropy','gender':'binary_crossentropy'},
loss_weightd=[0.25, 1. ,10.])
# 拟合
## 列表
model.fit(posts,[age_targets,income_targets,gender_targets],epochs=10,batch_size=128)
## 字典同上