MOOC网TensoroFlow入门实操课程2——图像分类

图像分类

安装google.colab的命令pip install colab_ssh --upgrade

猫狗大战分类

在这里插入图片描述

import os
import zipfile
from random import random
import numpy as np
import tensorflow as tf
import shutil
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from shutil import copyfile
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from google.colab import files
from tensorflow.keras.preprocessing import image


#切分数据集
#SOURCE  数据源
#TRAINING   训练集路径
#TESTING   测试集路径
#SPLIT_SIZE  切分百分比

def split_data(SOURCE,TRAINING,TESTING,SPLIT_SIZE):
    files=[]
    for filename in os.listdir(SOURCE):
        file=SOURCE+filename
        if os.path.getsize(file)>0:
            files.append(filename)
        else:
            print(filename+"is zero length,so ignoring.")
    training_length=int(len(files)*SPLIT_SIZE)  #训练集的长度要乘以百分比
    testing_length=int(len(files)-training_length)  #剩余的是测试集
    shuffled_set=random.sample(files,len(files))  #打乱分布
    training_set = shuffled_set[0:training_length]
    testing_set = shuffled_set[-testing_length:]

    #将路径赋值给训练集
    for filename in training_set:
        this_file = SOURCE+filename
        destination = TRAINING+filename
        copyfile(this_file, destination)
    # 将路径赋值给测试集
    for filename in testing_set:
        this_file = SOURCE+filename
        destination=TESTING+filename
        copyfile(this_file, destination)

CAT_SOURCE_DIR = "/tmp/PetImages/Cat/"
TRAINING_CATS_DIR="/tmp/cats-v-dogs/training/cats/"
TESTING_CATS_DIR="/tmp/cats-v-dogs/testing/cats/"
DOG_SOURCE_DIR="/tmp/PetImages/Dog/"
TRAINING_DOGS_DIR="/tmp/cats-v-dogs/training/dogs/"
TESTING_DOGS_DIR="/tmp/cats-v-dogs/testing/dogs/"

#为数据集创建文件夹
def create_dir(file_dir):
    if os.path.exists(file_dir):
        print("true")
        shutil.rmtree(file_dir)  #删除再建立
        os.makedirs(file_dir)
    else:
        os.makedirs(file_dir)

#创建文件夹
create_dir(TRAINING_CATS_DIR)
create_dir(TESTING_CATS_DIR)
create_dir(TESTING_DOGS_DIR)
create_dir(TESTING_DOGS_DIR)

split_size=0.9
split_data(CAT_SOURCE_DIR,TRAINING_CATS_DIR,TESTING_CATS_DIR,split_size)
split_data(DOG_SOURCE_DIR,TRAINING_DOGS_DIR,TESTING_DOGS_DIR,split_size)

#定义神经网络
model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(16,(3,3),activation='relu',input_shape=(150,150,3)),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(32,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(512,activation='relu'),
    tf.keras.layers.Dense(1,activation='sigmoid')
])
model.compile(optimizer=RMSprop(lr=0.001),loss='binart_crossentropy',metrics=['acc'])

#数据预处理
TRAINING_DIR=""
train_datagen=ImageDataGenerator(rescale=1.0/255.0)  #归一化
#  target_size=(150,150)  将图片设置为150*150大小
#  batch_size  将总数量除以batch_size的大小后得到训练的批次数量  如数量有22500个,就要训练22500/100=225次
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,batch_size=100,class_mode='binary',target_size=(150,150))

VALIDATION_DIR=""
validation_datagen=ImageDataGenerator(rescale=1.0/255.0)
validation_generator=validation_datagen.flow_from_directory(VALIDATION_DIR,batch_size=100,class_mode='binary',target_size=(150,150))
#  epochs 训练批次
history = model.fit_generator(
    train_generator,
    epochs=200,
    verbose=1,
    validation_data=validation_generator
)

#显示训练结果
acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']

epochs=range(len(acc))

plt.plot(epochs,acc,'r',"Training Accuracy")
plt.plot(epochs,val_acc,'b',"Validation Accuracy")
plt.title('Training and validation accuracy')
plt.figure()

plt.plot(epochs,acc,'r',"Training Loss")
plt.plot(epochs,val_acc,'b',"Validation Loss")
plt.figure()

#预测
uploaded = files.upload()
for fn in uploaded.keys():
    #predicting images
    path = '/content/'+fn
    img=image.load_img(path,target_size=(150,150))
    x = image.img_to_array(img)
    x = np.expand_dims(x,axis=0)  #将图像转换为一维向量
    images=np.vstack([x])  #将三个向量连接起来
    classes = model.predict(images,batch_size=10)
    print(classes[0])
    if classes[0]>0.5:
        print(fn+"is a dog")
    else:
        print(fn+"is a cat")

迁移学习

人马分类案例

import tensorflow as tf
import os
import zipfile
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import tensorflow
from google.colab import files
from tensorflow.keras.preprocessing import image

#解压数据集
local_zip='/home/jupyter-zhaoweidong/horse-or-human.zip'
zip_ref=zipfile.ZipFile(local_zip,'r')
zip_ref.extractall('/home/jupyter-zhaoweidong/horse-or-human')
zip_ref.close()
#数据集路径
train_horse_dir=os.path.join('/home/jupyter-zhaoweidong/horse-or-human/horses')
train_human_dir = os.path.join('/home/jupyter-zhaoweidong/horse-or-human/humans')

#打印前10个数据名称
train_horse_names=os.listdir(train_horse_dir)
print(train_horse_names[:10])
train_human_names=os.listdir(train_human_dir)
print(train_human_names[:10])
#打印数据集长度
print('total training horse images:',len(os.listdir(train_horse_dir)))
print('total training human images:',len(os.listdir(train_human_dir)))
#显示人和马的前8个图像
nrows=4
ncols=4
pic_index=0

fig=plt.gcf()
fig.set_size_inches(ncols*4,nrows*4)
pic_index+=8
next_horse_pix = [os.path.join(train_horse_dir,fname)
                  for fname in train_horse_names[pic_index-8:pic_index]]
next_human_pix=[os.path.join(train_human_dir,fname)
                for fname in train_human_names[pic_index-8:pic_index]]
for i,img_path in enumerate(next_horse_pix+next_human_pix):
    sp=plt.subplot(nrows,ncols,i+1)
    sp.axis('off')
    img=mpimg.imread(img_path)
    plt.imshow(img)
plt.show()
#创建模型
model=tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(16,(3,3),activation='relu',input_shape=(300,300,3)),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(32,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(512,activation='relu'),
    tf.keras.layers.Dense(1,activation='sigmoid')
])
#打印模型结构
print(model.summary())
#定义损失函数和学习率
model.compile(loss='binary_crossentropy',
              optimizer=RMSprop(LR=0.001),
              metrics=['acc'])

#归一化
train_datagen=ImageDataGenerator(rescale=1/255)
#预处理图像
train_generator=train_datagen.flow_from_directory(
    '/home/jupyter-zhaoweidong/horse-or-human/',
    target_size=(300,300),
    batch_size=128,
    class_mode='binary'
)

#训练
history=model.fit_generator(
    train_generator,
    steps_per_epoch=8,   #共有1027张图像,每个batch有128,一共有8轮
    epochs=2,
    verbose=1
)
#预测
uploaded=files.upload()
for fn in uploaded.keys():
    path='/content/'+fn
    img=image.load_img(path,target_size=(300,300))  #将图像设置为300*300
    x = image.img_to_array(img)    #将图像转换为数组
    x=np.expand_dims(x,axis=0)     #将图像设置为一维数组
    
    images=np.vstack([x])         #将三个一维数组拼接在一起
    classes = model.predict(images,batch_size=10)
    print(classes[0])
    if classes[0]>0.5:
        print(fn+"is a human")
    else:
        print(fn+"is a horse")


手写体识别分类

数据样本形式
在这里插入图片描述

import csv
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from google.api_core.colab import files
import matplotlib.pyplot as plt

uploaded = files.upload()

def get_data(filename):
    with open(filename) as training_file:
        csv_reader = csv.reader(training_file,delimiter=',') #用逗号分隔数据
        first_line = True
        temp_images = []
        temp_labels = []
        for row in csv_reader:
            if first_line:
                first_line=False  #如果是第一行则跳过
            else:
                temp_labels.append(row[0])  #第一列是标签
                image_data=row[1:785]   #将其他数据存入image_data中
                image_data_as_array = np.array_split(image_data,28)  #将数据还原为28*28
                temp_images.append(image_data_as_array)  #还原为图像
        images = np.array(temp_images).astype('float')   #数据设置为float类型
        labels= np.array(temp_labels).astype('float')   #数据设置为float类型
    return images,labels
training_images,training_labels=get_data('sign_mnist_train.csv')
testing_images,testing_labels = get_data('sign_mnist_test.csv')

print(training_images.shape)
print(training_labels.shape)
print(testing_images.shape)
print(testing_labels.shape)

#将图像转换为向量
training_images = np.expand_dims(training_images,axis=3)
testing_images = np.expand_dims(testing_images,axis=3)
#训练集图像预处理
train_datagen=ImageDataGenerator(
    rescale=1./255,   #归一化
    #数据增强
    rotation_range=40,      #随机旋转的角度范围
    width_shift_range=0.2,  #宽度方向的平移20%
    height_shift_range=0.2, #高度方向的平移20%
    shear_range=0.2,        #剪切20%
    zoom_range=0.2,         #缩放20%
    horizontal_flip=True,   #水平翻转
    fill_mode='nearest'  #用最近邻算法填充旋转后空白的像素
)
#测试集图像预处理
validation_datagen = ImageDataGenerator(
    rescale=1./255
)
#搭建模型Lenet网络
model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(64,(3,3),activation='relu',input_shape=(28,28,1)),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128,activation=tf.nn.relu),
    tf.keras.layers.Dense(26,activation=tf.nn.softmax)]  #最后分类为26类,是26个字母
)

model.compile(optimizer = tf.train.AdamOptimizer(),  #学习步长的优化
              loss = 'sparse_categorical_crossentropy',  #稀疏的交叉熵
              metrics=['accuracy'])  #度量用准确度
history = model.fit_generator(
    train_datagen.flow(training_images,training_labels,batch_size=32),  #每批32个样本
    steps_per_epoch=len(training_images)/32,  #有多少批
    epochs=15,
    validation_data=validation_datagen.flow(testing_images,testing_labels,batch_size=32),
    validation_steps=len(testing_images)/32
)
model.evaluate(testing_images,testing_labels)
#显示准确度曲线和损失曲线
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs,acc,'r',label='Training accuracy')
plt.plot(epochs,val_acc,'b',label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()

plt.plot(epochs,loss,'r',label='Training Loss')
plt.plot(epochs,val_loss,'b',label='Validation Loss')
plt.title('Training and Validation loss')
plt.legend()

plt.show()


图像多元分类

剪刀石头布手势识别

import os
import zipfile
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import tensorflow as tf
import keras_preprocessing
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator
import numpy as np

local_zip = '/tmp/rps.zip'
zip_ref = zipfile.ZipFile(local_zip,'r')
zip_ref.extractall('/tmp/')
zip_ref.close()
local_zip = '/tmp/rps-test-set.zip'
zip_ref = zipfile.ZipFile(local_zip,'r')
zip_ref.extractall('/tmp/')
zip_ref.close()

rock_dir = os.path.join('/tmp/rps/rock')
paper_dir = os.path.join('/tmp/rps.paper')
scissors_dir = os.path.join('/tmp/rps/scissors')

print('total training rock images:',len(os.listdir(rock_dir)))
print('total training paper images:',len(os.listdir(paper_dir)))
print('total training scissors images:',len(os.listdir(scissors_dir)))

rock_files = os.listdir(rock_dir)
print(rock_files[:10])

paper_files = os.listdir(paper_dir)
print(paper_dir[:10])

scissors_files = os.listdir(scissors_dir)
print(scissors_files[:10])

pic_index = 2
next_rock = [
    os.path.join(rock_dir,fname)
        for fname in rock_files[pic_index-2:pic_index]
]
next_paper = [os.path.join(paper_dir,fname)
                for fname in paper_files[pic_index-2:pic_index]]
next_scissors = [os.path.join(scissors_dir,fname)
                    for fname in scissors_dir[pic_index-2:pic_index]]

for i,img_path in enumerate(next_rock+next_paper+next_scissors):
    img=mpimg.imread(img_path)
    plt.imshow(img)
    plt.axis('off')
    plt.show()

#图像预处理
TRAINING_DIR='/tmp/rps'
training_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest'
)
VALIDATION_DIR='/tmp/rps-test-set/'
validation_datagen = ImageDataGenerator(rescale=1./255)

train_generator = training_datagen.flow_from_directory(
    TRAINING_DIR,
    target_size=(150,150),  #图像标准化为150*150
    class_mode='categorical'
)
validation_generator = validation_datagen.flow_from_directory(
    VALIDATION_DIR,
    target_size=(150,150),
    class_mode='categorical'
)
model=tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(64,(3,3),activation='relu',input_shape=(150,150)),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(128,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(128,(3,3),activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(512,activation='relu'),
    tf.keras.layers.Dense(3,activation='softmax')
])
model.summary()

model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
history=model.fit_generator(train_generator,epochs=25,validation_data=validation_generator,verbose=1)
model.save("rps.h5")

acc = history.history['acc']
val_acc=history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs=range(len(acc))

plt.plot(epochs,acc,'r',label='Training accuracy')
plt.plot(epochs,val_acc,'b',label='validation accuracy')
plt.title('Training and Validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()



#预测图像
uploaded = files.upload()
for fn in uploaded.keys():
    path=fn
    img = image.load_img(path,target_size=(150,150))
    x = image.img_to_array(img)
    x = np.expand_dims(x,axis=0)

    images = np.vstack([x])
    classes=model.predict(images,batch_size=10)
    print(fn)
    print(classes)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值