基于keras的卷积神经网络

构建网络

model = models.Sequential()  #定义模型
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(300, 300, 3)))  #添加卷积层,设定激活函数及输入图片大小
#model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))  #添加卷积层,设定激活函数及输入图片大小
# 32 * 3 * (30*30)
model.add(layers.MaxPooling2D((2, 2)))  #最大池化 15
model.add(layers.Conv2D(64, (3, 3), activation='relu'))   #卷积核数量调整,直接接收上层信息,不需要设定输入图片大小
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))   #卷积核数量调整,直接接收上层信息,不需要设定输入图片大小

model.add(layers.MaxPooling2D((2, 2)))  # 6*6
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#再加一层
#model.add(layers.MaxPooling2D((2, 2)))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2))) # 2*2
model.add(layers.Conv2D(256, (2, 2), activation='relu'))


model.add(layers.Flatten())  #数据展开铺平成一维
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(100, activation='softmax'))   #映射出100个类别
model.summary()   #显示模型结构
#train_generator validation_generator
train_dir  = ''
validation_dir = ''
#二值分类问题 文件夹下有两个文件夹,分别放着对应的类

train_datagen = ImageDataGenerator(rescale=1./255) # 将原始数据缩放到原来的1/255
test_datagen = ImageDataGenerator(rescale=1./255)
# 从指定的目录中产生批量的格式化数据
# target_size:所有图片经过处理后的尺寸
# 该generator每次返回一个20*150*150*3的张量和binary类型的标签(shape(20,))
train_generator = train_datagen.flow_from_directory(train_dir,
                                                   target_size=(300,300),
                                                   batch_size=20,
                                                   class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                      target_size=(300,300),
                                                     batch_size=20,
                                                   class_mode='binary')

#当然也有更简单的构建方法
#数据集的导入
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()  #利用自带数据库导入,此时数据集已经经过处理,训练会快很多

#train_images = train_images.reshape((60000,28,28))  #图片数量及大小
#test_images = test_images.reshape((10000,28,28))

train_images , test_images = train_images / 255.0 , test_images / 255.0  #数据处理映射到【0,1】空间
model.fit(train_images, train_labels, epochs=100)  #整合数据定义训练次数
#加载模型 
model.load_weights("./cats_and_dogs_small_1.h5")
model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
'''
优化器为adam,实际为梯度下降函数的变式
损失函数定义为交叉熵损失函数
'''

history = model.fit_generator(train_generator,
                             steps_per_epoch=100,
                             epochs=10,
                             validation_data=validation_generator,
                             validation_steps=50)
model.save('vin.h5')

评估

score = model.evaluate_generator(validation_generator,steps=1)
print("样本准确率%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))

######################################################################
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(test_acc)

使用vgg16网络训练二分类问题的完整代码:

import numpy as np
import tensorflow as tf
from keras import Model, Sequential
from keras.applications.imagenet_utils import (decode_predictions,
                                               preprocess_input)
from keras.layers import (Conv2D, Dense, Flatten, GlobalAveragePooling2D,
                          GlobalMaxPooling2D, Input, MaxPooling2D)
from keras.preprocessing import image
from keras.utils.data_utils import get_file
from tensorflow import keras
from keras.preprocessing.image import ImageDataGenerator
#WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
#WEIGHTS_PATH = "./vgg16_weights_tf_dim_ordering_tf_kernels.h5"
'''

'''


def VGG16(num_classes):
    image_input = Input(shape = (300,300,3))

    # 224,224,3 -> 112,112,64
    x = Conv2D(64,(3,3),activation = 'relu',padding = 'same',name = 'block1_conv1')(image_input)
    x = Conv2D(64,(3,3),activation = 'relu',padding = 'same', name = 'block1_conv2')(x)
    x = MaxPooling2D((2,2), strides = (2,2), name = 'block1_pool')(x)

    # 第二个卷积部分
    # 112,112,64 -> 56,56,128
    x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv1')(x)
    x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv2')(x)
    x = MaxPooling2D((2,2),strides = (2,2),name = 'block2_pool')(x)

    # 第三个卷积部分
    # 56,56,128 -> 28,28,256
    x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv1')(x)
    x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv2')(x)
    x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv3')(x)
    x = MaxPooling2D((2,2),strides = (2,2),name = 'block3_pool')(x)

    # 第四个卷积部分
    # 28,28,256 -> 14,14,512
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv1')(x)
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv2')(x)
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv3')(x)
    x = MaxPooling2D((2,2),strides = (2,2),name = 'block4_pool')(x)

    # 第五个卷积部分
    # 14,14,512 -> 7,7,512
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block5_conv1')(x)
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block5_conv2')(x)
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block5_conv3')(x)    
    x = MaxPooling2D((2,2),strides = (2,2),name = 'block5_pool')(x)

    # 分类部分
    # 7,7,512 -> 25088 -> 4096 -> 4096 -> num_classes
    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense(num_classes, activation='softmax', name='predictions')(x)
    
    model = Model(image_input,x,name = 'vgg16')
    return model

if __name__ == '__main__':
    model = VGG16(2)
    #weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models')
    #model.load_weights(WEIGHTS_PATH)
    train_dir = 'D:/program/keras/vin_'
    validation_dir = 'D:/program/keras/vin_'

    train_datagen = ImageDataGenerator(rescale=1. / 255)  # 将原始数据缩放到原来的1/255
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    # 从指定的目录中产生批量的格式化数据
    # target_size:所有图片经过处理后的尺寸
    # 该generator每次返回一个20*150*150*3的张量和binary类型的标签(shape(20,))
    train_generator = train_datagen.flow_from_directory(train_dir,
                                                        target_size=(300, 300),
                                                        batch_size=20,
                                                        class_mode='binary')
    validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                            target_size=(300, 300),
                                                            batch_size=20,
                                                            class_mode='binary')
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    '''
    优化器为adam,实际为梯度下降函数的变式
    损失函数定义为交叉熵损失函数
    '''

    history = model.fit_generator(train_generator,
                                  steps_per_epoch=100,
                                  epochs=10,
                                  validation_data=validation_generator,
                                  validation_steps=50)
    model.save('vin.h5')


    #test
    #img_path = 'elephant.jpg'
    #img = image.load_img(img_path, target_size=(224, 224))
    #x = image.img_to_array(img)
    #x = np.expand_dims(x, axis=0)
    #x = preprocess_input(x)
    #print('Input image shape:', x.shape)


    #preds = model.predict(x)
    #print('Predicted:', decode_predictions(preds))

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值