基于深度学习的玫瑰向日葵分类系统

1.环境配置

安装 Anaconda,启动anaconda,切换源,在命令行下输入如下指令切换到清华大学映像,并再确认使用这个映像。 conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ conda config --set show_channel_urls yes

使用 Anaconda 虚拟环境安装 TensorFlow:

1) 建立工作目录 在命令行下建立并切换到工作目录 md \pythonwork cd \pythonwork

2) 建立 TensorFlow Anaconda 虚拟环境 conda create --name tensorFlow python=3.6.4 anaconda 其中: conda create 是建立虚拟环境 --name tensorFlow 是建立虚拟环境的名字,也可以任意其他名字 python=3.6.4 是选用 python 的版本,命令行下敲入 python 可以查看版本

3) 启动 Anaconda 虚拟环境 建立 TensorFlow 的 Anaconda 虚拟环境后,就可以启动这个虚拟环境了。在命令提示 符窗口下输入下列命令: activate tensorFlow 其中 tensorFlow 是之前建立虚拟环境的名字。

4)安装tensorflow

pip install tensorflow

如果提示 pip 版本过低,使用下面指令更新 pip,否则无法正确安装 TensorFlow python -m pip install --upgrade pip

5) 安装 Keras 安装完 TensorFlow 后安装 Keras:pip install keras

6)启动jupyter

cd \pythonwork

activate tensorFlow

jupyter notebook

7)关闭 TensorFlow 的 Anaconda 虚拟环境:

deactivate

2.区分玫瑰向日葵

1)enhance 增强数据集

from PIL import Image, ImageEnhance
import os

def turn_left_rigth(img):
    return img.transpose(Image.FLIP_LEFT_RIGHT)#左右翻折
    
def brighten_darken(img, val):                    #亮度调节: 大于1为图片变亮,小于1为图片变暗
    return ImageEnhance.Brightness(img).enhance(val)

def saturation_up_down(img, val):                #饱和度调节: 大于1为图片饱和度增加,小于1为图片饱和度降低
    return ImageEnhance.Color(img).enhance(val)

def Contrast_up_down(img, val):                  #对比图调节: 大于1为图片对比度增加,小于1为图片对比度减小
    return ImageEnhance.Contrast(img).enhance(val)

def Sharpness_up_down(img, val):                #锐度调节: 大于1为图片锐度增加,小于1为图片锐度减小
    return ImageEnhance.Sharpness(img).enhance(val)

def picture_enhance(dir):
    img_list = os.listdir(dir)
    for i in range(len(img_list)):
        try:
            img = Image.open(dir + '\\' + img_list[i])
            temp = str(i)
            turn_left_rigth(img).convert('RGB').save(dir + '\\turn_left_rigth' + temp + '.jpg')
            Contrast_up_down(img, 1.2).convert('RGB').save(dir + '\\Contrast_up' + temp + '_1' + '.jpg')
            Contrast_up_down(img, 1.4).convert('RGB').save(dir + '\\Contrast_up' + temp + '_2' + '.jpg')
            Contrast_up_down(img, 0.8).convert('RGB').save(dir + '\\Contrast_down' + temp + '_1' + '.jpg')
            Contrast_up_down(img, 0.6).convert('RGB').save(dir + '\\Contrast_down' + temp + '_2' + '.jpg')
            Sharpness_up_down(img, 1.2).convert('RGB').save(dir + '\\Sharpness_up' + temp + '_1' + '.jpg')
            Sharpness_up_down(img, 1.4).convert('RGB').save(dir + '\\Sharpness_up' + temp + '_2' + '.jpg')
            Sharpness_up_down(img, 0.8).convert('RGB').save(dir + '\\Sharpness_down' + temp + '_1' + '.jpg')
            Sharpness_up_down(img, 0.6).convert('RGB').save(dir + '\\Sharpness_down' + temp + '_2' + '.jpg')
            brighten_darken(img, 1.2).convert('RGB').save(dir + '\\brighten' + temp + '_1' + '.jpg')
            brighten_darken(img, 1.4).convert('RGB').save(dir + '\\brighten' + temp + '_2' + '.jpg')
            brighten_darken(img, 0.8).convert('RGB').save(dir + '\\darken' + temp + '_1' + '.jpg')
            brighten_darken(img, 0.6).convert('RGB').save(dir + '\\darken' + temp + '_2' + '.jpg')
            saturation_up_down(img, 1.2).convert('RGB').save(dir + '\\saturation_up' + temp + '_1' + '.jpg')
            saturation_up_down(img, 1.4).convert('RGB').save(dir + '\\saturation_up' + temp + '_2' + '.jpg')
            saturation_up_down(img, 0.8).convert('RGB').save(dir + '\\saturation_down' + temp + '_1' + '.jpg')
            saturation_up_down(img, 0.6).convert('RGB').save(dir + '\\saturation_down' + temp + '_2' + '.jpg')
        except :
            pass

picture_enhance('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\0')
picture_enhance('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\1')

2)make list 制作list.txt

import os

def generate_list(dir, label):
    files = os.listdir(dir)    #os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表
    listText = open(dir + '\\' + 'list.txt', 'w')  #\\的转移字符对应\, 创建list.txt文件
    for file in files:
        name_label = dir + '\\' + file + ' ' +  str(int(label)) + '\n'   
        listText.write(name_label)
    listText.close()

#两个参数,arg1:dir(sunflowers, roses)  arg2:label
generate_list('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\0', 0)
generate_list('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\1', 1)

#分别将两个文件夹下的list.txt在上级目录下合并成一个list.txt
file0 = open('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\0\\list.txt', 'r')
roses_list = []
for i in file0.readlines():
    roses_list.append(i)
file0.close()

file1 = open('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\1\\list.txt', 'r')
sunflowers_list = []
for i in file1.readlines():
    sunflowers_list.append(i)
file1.close()

file = open('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\list.txt', 'w')  #创建上层目录list.txt,用于合并roses和sunflowers文件夹下的list.txt

for i in roses_list:
    file.write(i)
    
for i in sunflowers_list:
    file.write(i)
    
file.close()

os.remove('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\0\\list.txt')
os.remove('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\1\\list.txt')

3)制作数据集

import os
import numpy as np
from PIL import Image

4)def readData(txt_path):
    print('Loading images........')
    list_file = open(txt_path, 'r')
    content = list_file.readlines()
    image = []
    label = []
    for i in range(len(content)):
        print(i)
        try:
            line = content[i]
            im = Image.open(line.split()[0])             #split()默认以空格进行分割,line.split()[0]:表示空格之前的内容,line.split()[1]:表示空格后面的内容
            im = im.convert('RGB').resize((64,64), Image.ANTIALIAS)   #缩小图片过程中,使用ANTIALIAS过滤器,尽量使图片压缩过程中保证图片的质量
            im = np.array(im)
            image.append(im)
            line.split()[1] = np.array(int(line.split()[1]))
            label.append(line.split()[1])
        except:
            pass
    image_np_array = np.array(image)
    label_np_array = np.array(label)
    return (image_np_array, label_np_array)

(data_image, data_label) = readData('C:\\pythonwork\\Flower\\Flower_Orig_Pic\\list.txt')

#制作最终的数据集
np.savez('Rose_Sunflower_Data_64.npz', train_image = data_image, train_label = data_label)

4)model train 训练模型

import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras.utils import np_utils

#加载数据集
dataset = np.load('Rose_Sunflower_Data_64.npz')
image = dataset['train_image']
label = dataset['train_label']

print('dataset_number: ', len(image))

#rose:9000(train:8000(train:6400 valid:1600), test:1000)  sunflower:9000(train:8000(train:6400 valid:1600), test:1000)
train_image_0 = []
train_label_0 = []
train_image_1 = []
train_label_1 = []
test_image = []
test_label = []
for i in range(len(image)):
    if (label[i] == '0') & (len(train_label_0) < 8000):  #分类为0取8000项用于训练数据
        train_image_0.append(image[i])
        train_label_0.append(label[i])
        continue
    if (label[i] == '1') & (len(train_label_1) < 8000): #分类为1取8000项用于训练数据
        train_image_1.append(image[i])
        train_label_1.append(label[i])
        continue
    test_image.append(image[i])                          #剩余的部分作为测试数据(1000+1000 = 2000)
    test_label.append(label[i])
    
train_image_0 = np.array(train_image_0)
train_label_0 = np.array(train_label_0)

train_image_1 = np.array(train_image_1)
train_label_1 = np.array(train_label_1)

test_image = np.array(test_image)
test_label = np.array(test_label)

print(train_image_0.shape, train_label_0.shape)

#取出80%的数据用作训练数据,20%的数据用作验证数据
train_image = np.vstack((train_image_0[:6400], train_image_1[:6400]))  
valid_image = np.vstack((train_image_0[6400:], train_image_1[6400:]))
print(train_image.shape)
print(valid_image.shape)

train_label = np.concatenate((train_label_0[:6400], train_label_1[:6400]))
valid_label = np.concatenate((train_label_0[6400:], train_label_1[6400:]))
print(train_label.shape)
print(valid_label.shape)

def show_image(img):
    plt.imshow(img)
    plt.show()

show_image(train_image[0])

#数据预处理:特征部分进行标准化,标签部分进行一位有效编码转换
train_image_normalize = train_image.astype(float) / 255
train_label_onehotencoding = np_utils.to_categorical(train_label)

valid_image_normalize = valid_image.astype(float) / 255
valid_label_onehotencoding = np_utils.to_categorical(valid_label)

test_image_normalize = test_image.astype(float) / 255
test_label_onehotencoding = np_utils.to_categorical(test_label)

model = Sequential()

model.add(Conv2D(filters=32,kernel_size=(3,3), padding='same', input_shape=(64,64,3), activation='relu'))

model.add(MaxPooling2D(pool_size = (2, 2)))

model.add(Conv2D(filters=16,kernel_size=(3,3), padding='same', activation='relu'))

model.add(MaxPooling2D(pool_size = (2, 2)))

model.add(Dropout(0.5))

model.add(Flatten())

model.add(Dense(units=100, kernel_initializer='normal', activation='relu'))

model.add(Dropout(0.25))

model.add(Dense(units=2, kernel_initializer='normal', activation='softmax'))

print(model.summary())

model.compile(loss='categorical_crossentropy', optimizer ='adam', metrics=['accuracy'])

#shuffle=True 表示随机选择样本,避免某一个分类进行过多的选择
train_history = model.fit(train_image_normalize, train_label_onehotencoding, validation_data=(valid_image_normalize, valid_label_onehotencoding), shuffle=True, epochs=20, batch_size=200, verbose=2)

def show_train_history(train_history, train, validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train History')
    plt.xlabel('Epoch')
    plt.ylabel(train)
    plt.legend(['train', 'validation'])
    plt.show()

show_train_history(train_history, 'accuracy', 'val_accuracy')
show_train_history(train_history, 'loss', 'val_loss')

scores = model.evaluate(test_image_normalize, test_label_onehotencoding)
print(scores)

model.save('Rose_Sunflower_Model.h5')

5)predict 预测

from keras.models import load_model
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np

my_model = load_model('Rose_Sunflower_Model.h5')

def show_image(img):
    plt.imshow(img)
    plt.show()

img = Image.open('p4.jpg')

img = img.resize((64,64), Image.ANTIALIAS)

show_image(img)

number_data = img.getdata()
number_data_array = np.array(number_data)
number_data_array = number_data_array.reshape(1,64,64,3).astype(float)
number_data_array_normalize = number_data_array / 255
prediction = my_model.predict(number_data_array_normalize)
print(prediction)

index = np.argmax(prediction)
if index == 0:
    print('玫瑰')
elif index == 1:
    print('向日葵')

 

 

 

 

 

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值