keras 超简单建模

#coding:utf-8
#classifier
import numpy as np
np.random.seed(1337) #for reproducibility
from keras import regularizers,callbacks
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential,Model,load_model
from keras.layers import Input,Dense,Activation,Convolution2D,Flatten,MaxPooling2D,BatchNormalization,Dropout
from keras.optimizers import RMSprop,Adam

#download the mnist to the path'/.keras/datasets/'
#X shape(60,000 28*28),y shape(10,000,)
(X_train,y_train),(X_test,y_test) = mnist.load_data()

# #using CNN           test loss:  0.407238004947     test accuracy:  0.9768


X_train = X_train.reshape(-1,1,28,28)/255   #normalization
X_test = X_test.reshape(-1,1,28,28)/255
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

my_inputs = Input(shape = (1,28,28))
x = Convolution2D(
    batch_input_shape = (None,1,28,28),
    filters = 32,
    kernel_size = 5,
    strides = 1,
    padding = 'same',
    data_format = 'channels_first',
)(my_inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)

x = MaxPooling2D(
    pool_size = 2,
    strides = 2,
    padding = 'same',
    data_format = 'channels_first',
)(x)


x = Convolution2D(
    filters = 64,
    kernel_size = 5,
    strides = 1,
    padding = 'same',
    data_format = 'channels_first',
)(x)

x = BatchNormalization()(x)
x = Activation('relu')(x)


x = MaxPooling2D(
    pool_size = 2,
    strides = 2,
    padding = 'same',
    data_format = 'channels_first',
)(x)

x = Flatten()(x)
x = Dense(1024,activation = 'relu',kernel_regularizer = regularizers.l2(0.0001),activity_regularizer=regularizers.l1(0.0001))(x)
x = Dropout(0.4)(x)
my_outputs = Dense(10,activation = 'softmax')(x)

model = Model(inputs = my_inputs,outputs = my_outputs)



# Another way to define your optimizer
adam = Adam(lr=1e-4)

# We add metrics to get more results you want to see
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

#可以使用 model.get_config()来查看构造好的模型参数,使用model.summary()来查看总体模型结构
config = model.get_config()
print(type(config),config)
for x in config:
    print(x)
print('model.summary:------')
model.summary()


print('Training ------------')
# Another way to train the model
model.fit(X_train, y_train, epochs=10, batch_size=64,callbacks = [callbacks.EarlyStopping(min_delta = 0.001,patience = 2,monitor = 'acc',mode = 'max')])

print('\nTesting ------------')
# Evaluate the model with the metrics we defined earlier
loss, accuracy = model.evaluate(X_test, y_test)

print('\ntest loss: ', loss)
print('\ntest accuracy: ', accuracy)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值