keras 简单的分类实例

这段程序是使用keras进行深度学习分类问题。比较简单,将每个类别单独保存在一个文件夹中,然后包含所有类别的文件夹在放在同一个文件夹下面。比如下面所示:

train/

        cat/

        dag/

下面的代码写的是9分类的问题,

代码:

from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
import keras
from keras.preprocessing.image import  ImageDataGenerator
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.optimizers import SGD
from keras.models import Sequential, load_model
from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D
from keras.applications.inception_v3 import InceptionV3
from keras import metrics
from keras.optimizers import Adam 



import math
import os
import numpy as np 
import pandas as pd
import glob
from tqdm import tqdm
import cv2

num_classes = 9
image_height = 139
image_weight = 139
batch_size = 16
epochs = 50

def train_gen():
    train_datagen = ImageDataGenerator(
            rescale=1./255,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True)
    train_generator = train_datagen.flow_from_directory(
        'data/train',
        target_size=(image_height, image_weight),
        batch_size=32,
        class_mode='categorical')

    return train_generator


def test_gen():
    test_datagen = ImageDataGenerator(rescale=1./255)
    test_generator = test_datagen.flow_from_directory(
            './data/test',
            target_size=(image_height, image_weight),
            batch_size=32,
            class_mode='categorical')
    return test_generator

# 计算训练集的数量
def train_num_samples():
    path = './data/train' # 文件夹下有9个类别,所以这里统计9个类别,所有的图片数量
    count = 0
    for i in os.listdir(path):
        pt = os.path.join(path, i)
        ls = glob.glob(os.path.join(pt, '*.jpg'))
        count += len(ls)
    return count

train_generator = train_gen()


def create_model(input_shape, n_out):
    input_tensor = Input(shape=input_shape)
    base_model = InceptionV3(include_top=False,
                   weights='imagenet',
                   input_shape=input_shape)
    bn = BatchNormalization()(input_tensor)
    x = base_model(bn)
    x = Conv2D(32, kernel_size=(1,1), activation='relu')(x)
    x = Flatten()(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    output = Dense(n_out, activation='softmax')(x)
    
    model = Model(input_tensor, output)
    
    return model

# # create the base pre-trained model
# base_model = InceptionV3(weights='imagenet', include_top=False)



# warm up model
model = create_model(
    input_shape=(image_height,image_weight,3), 
    n_out=num_classes)
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate

model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics = ['accuracy'])


model_checkpoint = ModelCheckpoint('./model.hdf5', monitor='loss', save_best_only=True, mode='auto', verbose = 1)
reduce_lr = ReduceLROnPlateau(monitor='loss', patience=15, mode='auto') 

model_callbacks = [model_checkpoint, reduce_lr]
model.fit_generator(
        train_generator,
        steps_per_epoch=math.ceil( train_num_samples() / batch_size),
        epochs=epochs,
        verbose = 1,
        callbacks = model_callbacks)



# 预测部分
# 加载模型
model.load_weights('./model.hdf5')

def predict(MODEL):
    paths = glob.glob(os.path.join('./data/test', '*.jpg'))
    count = 0
    ID = []
    pro1 = []
    pro2 = []
    pro3 = []
    pro4 = []
    pro5 = []
    pro6 = []
    pro7 = []
    pro8 = []
    pro9 = []

    for i in tqdm(paths):
        # if count > 3:
            # break
        img = cv2.imread(i)
        img = cv2.resize(img, (image_height, image_weight))
        pre = MODEL.predict(np.expand_dims(img/255., 0))[0]
        # print(type(pre))
        # print(pre.shape)
        pre = pre.reshape((1,9))
        ID.append(i.split(os.sep)[-1])
        pro1.append(pre[0][0])
        pro2.append(pre[0][1])
        pro3.append(pre[0][2])
        pro4.append(pre[0][3])
        pro5.append(pre[0][4])
        pro6.append(pre[0][5])
        pro7.append(pre[0][6])
        pro8.append(pre[0][7])
        pro9.append(pre[0][8])

        # count += 1
    d = {'ID':ID, '001':pro1,'002':pro2,'003':pro3,'004':pro4,'005':pro5,'006':pro6,'007':pro7,'008':pro8,'009':pro9}
    # print(d)
    df = pd.DataFrame(d)
    df.to_csv('./result.csv', index = False, encoding='utf-8')
    

predict(model)

 

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值