TensorFlow的学习之路--猫狗识别

42人阅读 评论(0) 收藏 举报
分类:

训练网络:

# -*- coding: utf-8 -*-
import os  
import numpy as np  
import tensorflow as tf        
from parse_recorder_file import get_data
from AlexNet import AlexNet 
#from vgg16 import vgg_16
N_CLASSES = 2  # 2个输出神经元,[1,0] 或者 [0,1]猫和狗的概率
IMG_W = 128  # 重新定义图片的大小,图片如果过大则训练比较慢  
IMG_H = 128  
BATCH_SIZE = 32  #每批数据的大小  
MAX_STEP = 6 # 训练的步数,应当 >= 10000
keep_prob=0.5 
def run_training():  
      
    logs_train_dir = 'E:/Python/tensorflow/train_log/'  
    data_train_dir = 'E:/Python/tensorflow/model/'
    train_batch, train_label_batch = get_data('./train.tfrecords', IMG_W, True)
    train_logits=AlexNet(train_batch, keep_prob, N_CLASSES)
 #   train_logits = vgg_16(train_batch, 0.5)
    with tf.name_scope('Loss'):       
        train_loss1=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_logits, labels=train_label_batch)
        train_loss = tf.reduce_mean(train_loss1,name='loss') 
        tf.summary.scalar('loss', train_loss)
    with tf.name_scope('train_op'):
        train_op=tf.train.AdamOptimizer(learning_rate=0.00005).minimize(train_loss)
    correct = tf.nn.in_top_k(train_logits, train_label_batch, 1)  
    correct = tf.cast(correct, tf.float16) 
    with tf.name_scope('accuracy'):  
       train_acc=tf.reduce_mean(correct,name='acc')
       tf.summary.scalar('acc', train_acc)
    summary_op = tf.summary.merge_all()  
    saver = tf.train.Saver() 
    sess = tf.Session()  
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)          
#    sess.run(tf.global_variables_initializer()) 
    ckpt=tf.train.get_checkpoint_state('E:/Python/tensorflow/model/')
    if ckpt and ckpt.model_checkpoint_path:
        print(ckpt.model_checkpoint_path)
        saver.restore(sess,'E:/Python/tensorflow/model/model.ckpt-5')
    else:  
        sess.run(tf.global_variables_initializer()) 
    try:  
        for step in np.arange(MAX_STEP):   
            a,b=sess.run([train_batch, train_label_batch])
            _, tra_loss, tra_acc,log = sess.run([train_op, train_loss, train_acc,train_logits])  
                 
#            if step % 50 == 0:  
            print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))  
            summary_str = sess.run(summary_op)  
            train_writer.add_summary(summary_str, step) 
            if step % 5 == 0 or (step + 1) == MAX_STEP:  
                # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
                checkpoint_path = os.path.join(data_train_dir, 'model.ckpt')  
                saver.save(sess, checkpoint_path, global_step=step)  
                  
    except tf.errors.OutOfRangeError:  
        print('Done training -- epoch limit reached')  
    finally:   
     sess.close()  
run_training()
 

Alexnet框架:

import tensorflow as tf
import numpy as np

def AlexNet(X,KEEP_PROB,NUM_CLASSES):
    """Create the network graph."""
# 1st Layer: Conv (w ReLu) -> Lrn -> Pool
    conv1 = conv(X,[5,5,3,64],[64], 1, 1,name='conv1')
    norm1 = lrn(conv1, 2, 1e-05, 0.75,name='norm1')
    pool1 = max_pool(norm1, 2, 2, 2, 2,name='pool1')    ##64*64*64    
# 2nd Layer: Conv (w ReLu)  -> Lrn -> Pool with 2 groups
    conv2 = conv(pool1,[ 5, 5, 64, 128],[128] ,1, 1,name='conv2')
    norm2 = lrn(conv2, 2, 1e-05, 0.75,name='norm2')
    pool2 = max_pool(norm2, 2, 2, 2, 2,name='pool2')    ##32*32*128   
# 3rd Layer: Conv (w ReLu)
    conv3 = conv(pool2, [3, 3, 128, 256],[256], 1, 1,name='conv3')
# 4th Layer: Conv (w ReLu) splitted into two groups
    conv4 = conv(conv3, [3, 3, 256, 512],[512], 1, 1,name='conv4')
# 5th Layer: Conv (w ReLu) -> Pool splitted into two groups
    conv5 = conv(conv4, [3, 3, 512, 512],[512], 1, 1,name='conv5')
    pool5 = max_pool(conv5, 2, 2, 2, 2,name='pool5')
# 6th Layer: Flatten -> FC (w ReLu) -> Dropout
    flattened = tf.reshape(pool5, [-1, 16*16*512])
    fc6 = fc(flattened, [16*16*512,1024],[1024],name='fc6')
    fc6=tf.nn.relu(fc6)
    dropout6 = dropout(fc6, KEEP_PROB)
# 7th Layer: FC (w ReLu) -> Dropout
    fc7 = fc(dropout6, [1024,2048],[2048],name='fc7')
    fc7=tf.nn.relu(fc7)
    dropout7 = dropout(fc7, KEEP_PROB)
# 8th Layer: FC and return unscaled activations
    fc8 = fc(dropout7, [2048,NUM_CLASSES],[NUM_CLASSES],name='fc8')
    return fc8

def conv(x, kernel_size, bias_size, stride_y, stride_x,name):
    with tf.variable_scope(name) as scope:
        weights = tf.get_variable('weights',  
                                  shape=kernel_size,  
                                  dtype=tf.float32,  
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32)) 
        biases = tf.get_variable('biases',  
                                 shape=bias_size,  
                                 dtype=tf.float32,  
                                 initializer=tf.constant_initializer(0.1)) 
        conv = tf.nn.conv2d(x, weights, strides=[1, stride_y, stride_x, 1], padding='SAME')  
        pre_activation = tf.nn.bias_add(conv, biases,name=scope.name)
    
    return pre_activation


def fc(x, kernel_size, bias_size,name):
    """Create a fully connected layer."""
    with tf.variable_scope(name) as scope:
        weights = tf.get_variable('weights',  
                                  shape=kernel_size,  
                                  dtype=tf.float32,  
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32)) 
        biases = tf.get_variable('biases',  
                                 shape=bias_size,  
                                 dtype=tf.float32,  
                                 initializer=tf.constant_initializer(0.1)) 
        softmax_linear = tf.add(tf.matmul(x, weights), biases,name=scope.name)  
    return softmax_linear


def max_pool(x, filter_height, filter_width, stride_y, stride_x,name,padding='SAME'):
    """Create a max pooling layer."""
    return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],
                          strides=[1, stride_y, stride_x, 1],
                          padding=padding,name=name)


def lrn(x, radius, alpha, beta,name, bias=1.0):
    """Create a local response normalization layer."""
    return tf.nn.local_response_normalization(x, depth_radius=radius,
                                              alpha=alpha, beta=beta,
                                              bias=bias,name=name)

def dropout(x, keep_prob):
    """Create a dropout layer."""
    return tf.nn.dropout(x, keep_prob)
再配合之前的tfrecoder文件,迭代10000次,准确率在训练时有90%以上,但在测试时,准确率为80%,需要注意的点是,在模型读取restore时,对框架的命名需合理,不然会报错
查看评论

TensorFlow 卷积神经网络之猫狗识别

这份数据集来源于Kaggle,数据集有12500只猫和12500只狗。在这里简单介绍下整体思路 1. 处理数据 2. 设计神经网络 3. 进行训练测试 1. 数据处理 将图片数据处理为...
  • u012373815
  • u012373815
  • 2017-12-11 00:21:54
  • 3219

基于TensorFlow的Cats vs. Dogs(猫狗大战)实现和详解(1)

2017.5.29   猫狗大战,tensorflow实现,超详细讲解
  • qq_16137569
  • qq_16137569
  • 2017-05-29 15:33:48
  • 14308

tensorflow实现猫狗识别

  • 2017年10月10日 10:22
  • 9KB
  • 下载

TensorFlow 卷积神经网络之使用训练好的模型识别猫狗图片

此系列的上一篇是 训练猫狗图片识别模型TensorFlow 卷积神经网络之猫狗识别,这片文章是介绍,训练后的模型应该如何使用。 本文逻辑: 1. 我从网上下载了十几张猫和狗的图片,用于检验我们训练...
  • u012373815
  • u012373815
  • 2018-01-31 21:14:13
  • 1342

Tensorflow 实现AlexNet 猫狗分类

原文地址:AlexNet 关于文章的理解,网上有很多博客可以参考,这里只给出LRN(local response normalization)的一篇回答,其中形象的解释了LRN,如下图所示。地址链接...
  • u014484783
  • u014484783
  • 2018-03-20 10:22:51
  • 75

tensorflow 猫狗识别大战_视频

  • 2018年03月28日 16:24
  • 515.03MB
  • 下载

Tensorrflow实战猫狗识别

机器学习进阶实战30天旨在帮助大家快速掌握机器学习中的经典算法与实战策略,课程从实战角度出发,通过实际的案例来讲解算法的应用与提升。进阶篇引入当下热门的计算机视觉与自然语言处理,结合tensorflow框架展开实战分析,帮助同学们快速使用机器学习中的高阶算法进行实际应用。
  • 2018年02月06日 12:03

使用卷积神经网络区分猫和狗.md

卷积神经网络(Convolutionnal Neural Network, CNN)是一种前馈神经网络,它的人工神经单元可以响应一部分范围内的的周围单元,对于大型图像有出色的表现。 卷积神经网络通常由...
  • Darfie
  • Darfie
  • 2017-02-26 20:43:20
  • 1191

tensorflow 实战 猫狗大战(一)训练自己的数据

先记录下代码,再慢慢分析 input_data.py #coding=utf-8 import tensorflow as tf import numpy as np import os # fi...
  • hjxu2016
  • hjxu2016
  • 2017-07-18 15:05:44
  • 5725

对kaggle比赛的猫狗数据集的tensorflow训练

说明:本文参考文献很多,有前辈的博客,还有大神的视频课程。我在这儿就不一一列举了。注重一些我自己的理解,供参考。建立上图工程,数据集可以去Kaggle官网下载,建议有时间的去听听视频课程,讲的很详细,...
  • u014264373
  • u014264373
  • 2018-03-16 15:47:48
  • 87
    个人资料
    持之以恒
    等级:
    访问量: 1467
    积分: 216
    排名: 37万+
    文章分类
    文章存档