深度学习从数据集收集到测试2

1、训练

# -*- coding: utf-8 -*-
"""
Created on Fri Aug 10 23:43:05 2018

@author: Yanlei

神经网络的训练: 

"""

#======================================================================
#导入文件
import os
import numpy as np
import tensorflow as tf
import input_data
import model


#变量声明
N_CLASSES = 4  #
IMG_W = 64   # resize图像,太大的话训练时间久
IMG_H = 64
BATCH_SIZE =50   # batch大小
CAPACITY = 200   # 队列的最大容量
MAX_STEP = 200 # 训练次数
learning_rate = 0.0001 # 一般小于0.0001   学习率 


#获取批次batch
train_dir = 'D:/opencv image/1'   #训练样本的读入路径
logs_train_dir = 'D:/opencv image/1'    #logs存储路径
#train_dir = 'E:/DeepLearning/Train_Our_Dataset/Images_data'   #训练样本的读入路径
#logs_train_dir = 'E:/DeepLearning/Train_Our_Dataset/Images_data'    #logs存储路径

#train, train_label = input_data.get_files(train_dir)
train, train_label, val, val_label = input_data.get_files(train_dir, 0.3)
#训练数据及标签
train_batch,train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
#测试数据及标签
val_batch, val_label_batch = input_data.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) 
 
#训练操作定义
train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = model.losses(train_logits, train_label_batch)        
train_op = model.trainning(train_loss, learning_rate)
train_acc = model.evaluation(train_logits, train_label_batch)
 
#测试操作定义
test_logits = model.inference(val_batch, BATCH_SIZE, N_CLASSES)
test_loss = model.losses(test_logits, val_label_batch)        
test_acc = model.evaluation(test_logits, val_label_batch)
 
#这个是log汇总记录
summary_op = tf.summary.merge_all() 
 
#产生一个会话
sess = tf.Session()  
#产生一个writer来写log文件
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) 
#val_writer = tf.summary.FileWriter(logs_test_dir, sess.graph) 
  

saver = tf.train.Saver()      #产生一个saver来存储训练好的模型


#所有节点初始化
sess.run(tf.global_variables_initializer())  
#队列监控
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
 
#进行batch的训练
try:
    #执行MAX_STEP步的训练,一步一个batch
    for step in np.arange(MAX_STEP):
        if coord.should_stop():
            break
        #启动以下操作节点,有个疑问,为什么train_logits在这里没有开启?
        _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
        
        #每隔50步打印一次当前的loss以及acc,同时记录log,写入writer   
        if step % 10  == 0:
            print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
            summary_str = sess.run(summary_op)
            train_writer.add_summary(summary_str, step)
        #每隔100步,保存一次训练好的模型
        if (step + 1) == MAX_STEP:
            checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')    # 函数起到链接作用,生成的是一个指定路径下的文
            saver.save(sess, checkpoint_path, global_step=step)
            
            print('Finsh!')
       
except tf.errors.OutOfRangeError:
    print('Done training -- epoch limit reached')
 
finally:
    coord.request_stop()
    

2、测试

# -*- coding: utf-8 -*-
"""
下面我们就开始测试网络,验证网络的训练效果;
本次测试为随机的单图片测试,即随机的从训练集或测试集中读取一张图片,
送入到神经网络中进行识别,打印识别率及识别的图像。
"""
#=============================================================================
from PIL import Image
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import model
from input_data import get_files
 
#=======================================================================
#获取一张图片
def get_one_image(train):
    #输入参数:train,训练图片的路径
    #返回参数:image,从训练图片中随机抽取一张图片
    n = len(train)
    ind = np.random.randint(0, n)
    img_dir = train[ind]   #随机选择测试的图片
    img = Image.open(img_dir)
    plt.imshow(img)
    plt.show ( )#不加不显示
    imag = img.resize([64, 64])  #由于图片在预处理阶段以及resize,因此该命令可略
    image = np.array(imag)
    return image
 
#--------------------------------------------------------------------
#测试图片
def evaluate_one_image(image_array):
    with tf.Graph().as_default():
       BATCH_SIZE = 1
       N_CLASSES = 4
 
       image = tf.cast(image_array, tf.float32)
       image = tf.image.per_image_standardization(image)
       image = tf.reshape(image, [1, 64, 64, 3])
 
       logit = model.inference(image, BATCH_SIZE, N_CLASSES)
 
       logit = tf.nn.softmax(logit)
 
       x = tf.placeholder(tf.float32, shape=[64, 64, 3])
 
       # you need to change the directories to yours.
       #logs_train_dir = 'E:/DeepLearning/Train_Our_Dataset/Images_data/'
       logs_train_dir = 'D:/opencv image/1/'
 
       saver = tf.train.Saver()
 
       with tf.Session() as sess:
           
           # =================== 获取绝对路径方法 ========================
           print("Reading checkpoints...")
           ckpt = tf.train.get_checkpoint_state(logs_train_dir)
           if ckpt and ckpt.model_checkpoint_path:
               global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
               saver.restore(sess, ckpt.model_checkpoint_path)
               print('Loading success, global_step is %s' % global_step)
           else:
               print('No checkpoint file found')
 
           prediction = sess.run(logit, feed_dict={x: image_array})
           
           print('========================')
           print('prediction',prediction)
           print('============================')
           
           max_index = np.argmax(prediction)
           if max_index==0:
               print('This is a Car with possibility %.6f' %prediction[:, 0])
           elif max_index==1:
               print('This is a Cat with possibility %.6f' %prediction[:, 1])
           elif max_index==2:
               print('This is a Dog with possibility %.6f' %prediction[:, 2])
           else:
               print('This is a Face with possibility %.6f' %prediction[:, 3])
 
#------------------------------------------------------------------------
               
if __name__ == '__main__':
    
    #train_dir = 'E:/DeepLearning/Train_Our_Dataset/Images_data'
    train_dir = 'D:/opencv image/1'

    train, train_label, val, val_label = get_files(train_dir, 0.3)
    img = get_one_image(val)  #通过改变参数train or val,进而验证训练集或测试集
    evaluate_one_image(img)
    print(img .shape)
#===========================================================================
©️2020 CSDN 皮肤主题: 像素格子 设计师:CSDN官方博客 返回首页