猫狗数据保存CNN模型

本文详细介绍了如何运用卷积神经网络(CNN)训练一个猫狗分类模型,并且讲解了如何将训练好的模型进行保存,以便后续使用。通过实例展示了数据预处理、模型构建、训练过程以及模型保存的关键步骤。
摘要由CSDN通过智能技术生成
import numpy as np
import pickle
import cv2
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt

#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
train_data = {b'data': [], b'labels': []}
with open("D:/TensorFlow_gpu/animal.pickle", mode='rb') as file:
    data = pickle.load(file, encoding='bytes')
    train_data[b'data'] += list(data['train_images'])
    train_data[b'labels'] += list(data['train_label'])

train_epochs = 102  # 训练轮数
#batch_size = 40  # 随机出去数据大小
batch_size = 40
display_step = 10  # 显示训练结果的间隔
learning_rate = 0.00001  # 学习效率
drop_prob = 0.2  # 正则化,丢弃比例
fch_nodes = 256  # 全连接隐藏层神经元的个数

def weight_init(shape):
    #weights = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)#符合正太分布mean=0
    weights = tf.truncated_normal(shape, mean=0.001, stddev=0.1, dtype=tf.float32)
    return tf.Variable(weights)


# 偏置的初始化
def biases_init(shape):
    biases = tf.random_normal(shape, dtype=tf.float32)
   # biases = tf.random_normal(shape, mean=-0.01, stddev=0.1, dtype=tf.float32)
    return tf.Variable(biases)

# 随机选取mini_batch
def get_random_batchdata(n_samples, batchsize):
    start_index = np.random.randint(0, n_samples - batchsize)
    return (start_index, start_index + batchsize)

def xavier_init(layer1, layer2, constant=1):
    Min = -constant * np.sqrt(6.0 / (layer1 + layer2))
    Max = constant * np.sqrt(6.0 / (layer1 + layer2))
    return tf.Variable(tf.random_uniform((layer1, layer2), minval=Min, maxval=Max, dtype=tf.float32))


def conv2d(x, w):
    return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

x = tf.placeholder(tf.float32, [None, 224, 224, 3])
y = tf.placeholder(tf.float32, [None, 2])
# 把灰度图像一维向量,转换为28x28二维结构
x_image = x

w_conv1 = weight_init([3, 3, 3, 48])  # 3*3,深度为3,96
b_conv1 = biases_init([48])
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)  # 输出张量的尺寸:112
h_pool1 = max_pool_2x2(h_conv1)
print(w_conv1)
W_conv2 = weight_init([3, 3, 48, 96])
b_conv2 = biases_init([96])
h_conv2 = tf.nn.tanh(conv2d(h_pool1, W_conv2) + b_conv2)#输出是56
h_pool2 = max_pool_2x2(h_conv2)#池化后输出16*16*96
#2-1
W_conv3 = weight_init([3, 3, 96, 128])
b_conv3 = biases_init([128])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)#输出28
h_pool3 = max_pool_2x2(h_conv3)#池化后输出16*16*96
#第2层卷积2-2

W_conv4 = weight_init([3, 3, 128, 128])
b_conv4 = biases_init([128])
h_conv4 = tf.nn.tanh(conv2d(h_pool3, W_conv4) + b_conv4)#14
h_pool4 = max_pool_2x2(h_conv4)#池化输出8*8*128
#3-1
W_conv5 = weight_init([3, 3, 128, 256])
b_conv5 = biases_init([256])
h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5)#7*7*256
h_pool5 = max_pool_2x2(h_conv5)#

h_pool5_flat = tf.reshape(h_pool5, [-1, 7 * 7 * 256])

w_fc1 = xavier_init(7 * 7 * 256, fch_nodes)
b_fc1 = biases_init([fch_nodes])
h_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat, w_fc1) + b_fc1)

h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=drop_prob)

# 隐藏层与输出层权重初始化
w_fc2 = xavier_init(fch_nodes, 2)
b_fc2 = biases_init([2])

# 未激活的输出
y_ = tf.add(tf.matmul(h_fc1, w_fc2), b_fc2)
#y_ = tf.add(tf.matmul(h_fc1_drop, w_fc2), b_fc2)

# 激活后的输出
y_out = tf.nn.softmax(y_)
#y_out = tf.nn.sigmoid(y_)


tf.add_to_collection('v', w_conv1)
tf.add_to_collection('v', W_conv2)
tf.add_to_collection('v', W_conv3)
tf.add_to_collection('v', W_conv4)
tf.add_to_collection('v', W_conv5)
tf.add_to_collection('b', b_conv1)
tf.add_to_collection('b', b_conv2)
tf.add_to_collection('b', b_conv3)
tf.add_to_collection('b', b_conv4)
tf.add_to_collection('b', b_conv5)

tf.add_to_collection('fw', w_fc1)
tf.add_to_collection('fw', w_fc2)
tf.add_to_collection('fb', b_fc1)
tf.add_to_collection('fb', b_fc2 )


cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_out), reduction_indices=[1]))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)

# 准确率
# 每个样本的预测结果是一个(1,10)的vector
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_out, 1))
# tf.cast把bool值转换为浮点数
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
#mnist = input_data.read_data_sets('MNIST/mnist', one_hot=True)
n_samples = int(1800)
total_batches = int(n_samples / batch_size)

#x_train = np.array(train_data[b'data']) / 255
x_train = np.array(train_data[b'data'])
y_train = np.array(pd.get_dummies(train_data[b'labels']))

input_tupian = np.zeros((1000, 1000))

with tf.Session() as sess:
    saver = tf.train.Saver()
    sess.run(init)
    Cost = []
    Accuracy = []
    #variable_names = [v.name for v in tf.trainable_variables()]
    #print(train_data[b'labels'])
    for i in range(train_epochs):
        for j in range(40):
            #print(x_train.shape)
            #print(y_train.shape)
            start_index, end_index = get_random_batchdata(n_samples, batch_size)
            batch_x =x_train[start_index: end_index]
            #print(batch_x)
            batch_y =y_train[start_index: end_index]
            _, cost, accu = sess.run([optimizer, cross_entropy, accuracy], feed_dict={x: batch_x, y: batch_y})
            Cost.append(cost)
            Accuracy.append(accu)
        v = tf.get_collection('v')
        b = tf.get_collection('b')
        fw = tf.get_collection('fw')
        fb = tf.get_collection('fb')
        print("step %d, trainning accuracy, %g loss %g" % (i, accu, cost))
        saver.save(sess, './my_test_model', global_step=100)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值