Keras 使用自己的数据集进行训练和测试

本文详细介绍了一种基于深度学习的图像分类方法,通过使用Keras和TensorFlow框架,实现了从图像预处理到模型构建、训练及评估的全过程。实验中采用不同距离的图像数据集,演示了如何利用卷积神经网络(CNN)进行特征提取和分类,最终得到较高的分类准确率。
from PIL import  Image
import matplotlib.pyplot as plt
import os, sys
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Input, Embedding, LSTM, Dense, Activation, Convolution2D,MaxPooling2D, Dropout, Flatten
import tensorflow as tf
from keras import backend as K

#规定图片的行数,列数
X_train=[]
Y_train=[]
X_test=[]
Y_test=[]
classes=2
row, col = 100, 100 #规定图片的行数列数
count0=0
count1=0
#规定图片的存储地址
locationPicPath1='C:\\Users\\Administrator\\Desktop\\shibie\\cnnownpic\\cordova1\\'
locationSavedPicPath1='D:/datadistance/1.5_1/'
locationSavedPicTestPathOnePointFive='D:/datadistance/1.5_1test/'


locationPicPath2='C:\\Users\\Administrator\\Desktop\\shibie\\cnnownpic\\cordova2\\'
locationSavedPicPath2='D:/datadistance/1.75_1/'
locationSavedPicTestPathOnePointSevenFive='D:/datadistance/1.75_1test/'

#把原始文件放到一个List当中
listingPic1 = os.listdir(locationPicPath1)
#重构图片然后保存
# for file in listingPic1:
#     if file != "":
#         img = Image.open(locationPicPath1+file)
#         resizeImg = img.resize((row, col))#重构图像大小
#         resizeImg.save(locationSavedPicPath1+file)


listingPic2 = os.listdir(locationPicPath2)
# 重构图片然后保存
# for file in listingPic2:
#     if file != "":
#         img = Image.open(locationPicPath2 + file)
#         resizeImg = img.resize((row, col))  # 重构图像大小
#         resizeImg.save(locationSavedPicPath2 + file)


#把重构后的图片放到一个List当中
listingSavedPic1 = os.listdir(locationSavedPicPath1)
for file in listingSavedPic1:
    if file != "":
        img = Image.open(locationSavedPicPath1+file)
        x = img_to_array(img)
        X_train.append(x)
        Y_train.append(0)
print("1.5训练集加载完成")


listingSavedPic2 = os.listdir(locationSavedPicPath2)
for file in listingSavedPic2:
    if file != "":
        img = Image.open(locationSavedPicPath2+file)
        x = img_to_array(img)
        X_train.append(x)
        Y_train.append(1)
print("1.75训练集加载完成")

listingSavedPicTest2 = os.listdir(locationSavedPicTestPathOnePointSevenFive)
for file in listingSavedPicTest2:
    if file != "":
        img = Image.open(locationSavedPicTestPathOnePointSevenFive+file)
        x = img_to_array(img)
        X_test.append(x)
        Y_test.append(1)
print("1.75测试集加载完成")

listingSavedPicTest1 = os.listdir(locationSavedPicTestPathOnePointFive)
for file in listingSavedPicTest1:
    if file != "":
        img = Image.open(locationSavedPicTestPathOnePointFive+file)
        x = img_to_array(img)
        X_test.append(x)
        Y_test.append(0)
print("1.5测试集加载完成")







total_input = len(X_train)
print("Total Train Data : %d" %total_input)
X_train = np.array(X_train)
X_train = X_train.reshape(total_input, row, col, 3)
X_train = X_train.astype('float32')
X_train /= 255
Y_train = np.array(Y_train)
#设置OneHot编码
Y_train = keras.utils.to_categorical(Y_train, classes)

total_input_test = len(X_test)
X_test = np.array(X_test)
X_test = X_test.reshape(total_input_test, row, col, 3)
X_test = X_test.astype('float32')
X_test /= 255
Y_test = np.array(Y_test)
#设置OneHot编码
Y_test = keras.utils.to_categorical(Y_test, classes)
# 设置模型的参数


input_size = row * col # 100*100
batch_size = 64
hidden_neurons = 30
epochs = 6
'''
以下代码是防止以下问题的出现
could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR
'''
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)

#自主搭建网络层
model = Sequential()
model.add(Convolution2D(32, (2, 2), input_shape=(row, col, 3))) #3通道,32层
model.add(Activation('relu'))
model.add(Convolution2D(32, (2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(32, (2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(32, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(hidden_neurons))
model.add(Activation('relu'))
model.add(Dense(classes))
model.add(Activation('softmax'))

#打印模型
model.summary()

model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adadelta')
#verbose为日志显示verbose = 0 为不在标准输出流输出日志信息verbose = 1 为输出进度条记录verbose = 2 为每个epoch输出一行记录
history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_test,Y_test), verbose=1)

score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])


#手动进行正确率的判定,并画出图形
predict_test = model.predict(X_test, batch_size=64, verbose=1)
predict = np.argmax(predict_test, axis=1)
print(predict)

#875为随机选择的一个测试集的的个数
for i in range(len(predict)):
    if i <=874:
        if predict[i] == 1:
            count0=count0 + 1
    else:
        if predict[i] == 0:
            count1 = count1 + 1
print("test_accuracy", (count0+count1)/len(predict))

print(history.history.keys())
acclist = history.history['acc']
print('max accuacy->', max(acclist))
plt.figure(1)
plt.plot(history.history['acc'])
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.title('accuracy')
plt.figure(2)
plt.plot(history.history['loss'])
plt.ylabel('loss')
plt.xlabel('epoch')
plt.title('loss')
plt.show()
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值