用CNN做垃圾分类,最后达到90准确度

整体代码

导入包

import numpy as np
import cv2
from keras.callbacks import ModelCheckpoint,EarlyStopping
from keras.layers import Conv2D, Flatten, MaxPooling2D,Dense,Dropout,SpatialDropout2D
from keras.models  import Sequential
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img, array_to_img
import random,os,glob
import matplotlib.pyplot as plt

导入图片

dir_path = './Garbage classification/Garbage classification'
img_list = glob.glob(os.path.join(dir_path, '*/*.jpg'))
len(img_list)

用ImageDataGenerator给CNN训练的每次epoch喂固定数量随机转换的图片,提高模型的鲁棒性和泛化能力

train=ImageDataGenerator(horizontal_flip=True,
                         vertical_flip=True,
                         validation_split=0.1,
                         rescale=1./255,
                         shear_range = 0.1,
                         zoom_range = 0.1,
                         width_shift_range = 0.1,
                         height_shift_range = 0.1,)

test=ImageDataGenerator(rescale=1/255,
                        validation_split=0.1)

train_generator=train.flow_from_directory(dir_path,
                                          target_size=(300,300),
                                          batch_size=32,
                                          class_mode='categorical',
                                          subset='training')

test_generator=test.flow_from_directory(dir_path,
                                        target_size=(300,300),
                                        batch_size=32,
                                        class_mode='categorical',
                                        subset='validation')

labels = (train_generator.class_indices)
print(labels)

for image_batch, label_batch in train_generator:
    break
image_batch.shape, label_batch.shape
print (train_generator.class_indices)

Labels = '\n'.join(sorted(train_generator.class_indices.keys()))

with open('labels.txt', 'w') as f:
    f.write(Labels)

构建模型

model=Sequential()
#Convolution blocks

model.add(Conv2D(32,(3,3), padding='same',input_shape=(300,300,3),activation='relu'))
model.add(MaxPooling2D(pool_size=2)) 
#model.add(SpatialDropout2D(0.5)) # No accuracy

model.add(Conv2D(64,(3,3), padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=2)) 
#model.add(SpatialDropout2D(0.5))

model.add(Conv2D(32,(3,3), padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=2)) 

#Classification layers
model.add(Flatten())

model.add(Dense(64,activation='relu'))
#model.add(SpatialDropout2D(0.5))
model.add(Dropout(0.2))
model.add(Dense(32,activation='relu'))

model.add(Dropout(0.2))
model.add(Dense(6,activation='softmax'))

filepath="trained_model.h5"
checkpoint1 = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint1]

模型总结

model.summary()

训练模型

Compiling Model using categorical cross entropy loss function & Adam Optimizer

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc']) # RMS PROP - No accuracy

#es=EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)

history = model.fit_generator(train_generator,
                              epochs=100,
                              steps_per_epoch=2276//32,
                              validation_data=test_generator,
                              validation_steps=251//32,
                              workers = 4,
                              callbacks=callbacks_list) 
#41 epoch - 75% #73- 76.9%
#78 epoch - 80%

Testing PREDICTION

from keras.preprocessing import image

img_path = './Garbage classification/Garbage classification/plastic/plastic75.jpg'

img = image.load_img(img_path, target_size=(300, 300))
img = image.img_to_array(img, dtype=np.uint8)
img=np.array(img)/255.0

plt.title("Loaded Image")
plt.axis('off')
plt.imshow(img.squeeze())

p=model.predict(img[np.newaxis, ...])

#print("Predicted shape",p.shape)
print("Maximum Probability: ",np.max(p[0], axis=-1))
predicted_class = labels[np.argmax(p[0], axis=-1)]
print("Classified:",predicted_class)


classes=[]
prob=[]
print("\n-------------------Individual Probability--------------------------------\n")

for i,j in enumerate (p[0],0):
    print(labels[i].upper(),':',round(j*100,2),'%')
    classes.append(labels[i])
    prob.append(round(j*100,2))
    
def plot_bar_x():
    # this is for plotting purpose
    index = np.arange(len(classes))
    plt.bar(index, prob)
    plt.xlabel('Labels', fontsize=12)
    plt.ylabel('Probability', fontsize=12)
    plt.xticks(index, classes, fontsize=12, rotation=20)
    plt.title('Probability for loaded image')
    plt.show()
plot_bar_x()

Accuracy Graph

acc = history.history['acc']
val_acc = history.history['val_acc']

loss = history.history['loss']
val_loss = history.history['val_loss']

# ________________ Graph 1 -------------------------

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')

# ________________ Graph 2 -------------------------

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,max(plt.ylim())])
plt.title('Training and Validation Loss')
plt.show()

Improve

改用自适应的Nadam优化器,并加上earlyStopping

model.compile(loss='categorical_crossentropy',
              optimizer='Nadam',
              metrics=['acc'])

history1 = model.fit(train_generator,
                              epochs=100,
                              steps_per_epoch=2276//32,
                              validation_data=test_generator,
                              validation_steps=251//32,
                              workers = 4,
                              callbacks=callbacks_list) 

earlyStopping = EarlyStopping(monitor='acc', patience=20, verbose=1, mode='max')

acc = history1.history['acc']
val_acc = history1.history['val_acc']

loss = history1.history['loss']
val_loss = history1.history['val_loss']

# ________________ Graph 1 -------------------------

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')

# ________________ Graph 2 -------------------------

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,max(plt.ylim())])
plt.title('Training and Validation Loss')
plt.show()

可选改进(不一定有效)

将图片灰度化

char_list = os.listdir("./Garbage classification/Garbage classification/")
print(char_list)
x = np.ones((2527,300,300))
y = []
x_index = 0
for index in range(0, len(char_list)):
    image_list = os.listdir("./Garbage classification/Garbage classification/" + char_list[index])
    print(os.path.join("./Garbage classification/Garbage classification/", char_list[index]))
    for image in image_list:
        img = io.imread(os.path.join("./Garbage classification/Garbage classification/", char_list[index],image))
        reader = skimage.transform.resize(img, (300, 300))
        item_gray = color.rgb2gray(reader)
        item_gray = feature.canny(item_gray, sigma=3)
        item_gray = morphology.dilation(item_gray)
        x[x_index] = item_gray
        y.append(index)
        x_index = x_index + 1

PCA降维

import numpy as np
import os
from PIL import Image, ImageOps
import random
import scipy.io
import imageio
dir_path = os.path.join('E:/MLPRO/Garbage classification/Garbage classification/paper')
def PCA_Jittering(path):
    img_list = os.listdir(path)
    img_num = len(img_list)

    for i in range(img_num):
        img_path = os.path.join(path, img_list[i])
        img = Image.open(img_path)    

        img = np.asanyarray(img, dtype = 'float32')

        img = img / 255.0
        img_size = img.size // 3    #转换为单通道
        img1 = img.reshape(img_size, 3)

        img1 = np.transpose(img1)   #转置
        img_cov = np.cov([img1[0], img1[1], img1[2]])    #协方差矩阵
        lamda, p = np.linalg.eig(img_cov)     #得到上述协方差矩阵的特征向量和特征值

        #p是协方差矩阵的特征向量
        p = np.transpose(p)    #转置回去

        #生成高斯随机数********可以修改
        alpha1 = random.gauss(0,3)
        alpha2 = random.gauss(0,3)
        alpha3 = random.gauss(0,3)

        #lamda是协方差矩阵的特征值
        v = np.transpose((alpha1*lamda[0], alpha2*lamda[1], alpha3*lamda[2]))     #转置

        #得到主成分
        add_num = np.dot(p,v)

        #在原图像的基础上加上主成分
        img2 = np.array([img[:,:,0]+add_num[0], img[:,:,1]+add_num[1], img[:,:,2]+add_num[2]])

        #现在是BGR,要转成RBG再进行保存
        img2 = np.swapaxes(img2,0,2)
        img2 = np.swapaxes(img2,0,1)
        img2 = (img2 * 255).astype(np.uint8)
        save_name = 'paper'+str(i)+'.jpg'
        save_path = os.path.join('E:/MLPRO/Garbage classification/Garbage classification/new_paper', save_name)
        imageio.imsave(save_path,img2)

        #plt.imshow(img2)
        #plt.show()

PCA_Jittering(dir_path)

model=Sequential()
#Convolution blocks

model.add(Conv2D(32,(3,3), padding='same',input_shape=(300,300,1),activation='relu'))
model.add(MaxPooling2D(pool_size=2)) 
#model.add(SpatialDropout2D(0.5)) # No accuracy

model.add(Conv2D(64,(3,3), padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=2)) 
#model.add(SpatialDropout2D(0.5))

model.add(Conv2D(32,(3,3), padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=2)) 

#Classification layers
model.add(Flatten())

model.add(Dense(64,activation='relu'))
#model.add(SpatialDropout2D(0.5))
model.add(Dropout(0.2))
model.add(Dense(32,activation='relu'))

model.add(Dropout(0.2))
model.add(Dense(6,activation='softmax'))

filepath="trained_model.h5"
checkpoint1 = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint1,es]


  • 3
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值