tensorflow的简单测试应用

本章内容:

1.tf.nn.conv2d()--卷积函数的运算过程

2.tensorflow--线性回归

3.tensorflow--逻辑回归

4.浅层神经网络(sgimoid)

5.浅层神经网络+一个隐藏层(relu函数)

6.浅层神经网络+cnn卷积)

7.训练卷积神经网络+保存模型

8.训练卷积神经网络+恢复模型


1.验证卷积函数的运算过程

"""
函数格式:tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu =  Noen, name = None)

1.input:指需要做卷积的输入图像

要求是一个4维的Tensor,shape为[batch, in_height, in_width, in_channels]:
batch:训练时一个batch的图片数量
in_height:输入图像的高度
in_width:输入图像的宽度
in_channels:输入图像的通道数,灰度图像则为1,彩色图像则为3

2.filter:CNN卷积网络中的卷积核

要求是一个Tensor,类型和input类型相同,shape为[filter_height, filter_width, in_channels, out_channels]:
filter_height:卷积核的高度
filter_width:卷积核的宽度
in_channels:图像的通道数,input的in_channels相同
out_channels:卷积核的个数

3.strides:不同维度上的步长,是一个一维向量,[ 1, strides, strides, 1],
第一维和最后一维的数字要求必须是1。因为卷积层的步长只对矩阵的长和宽有效。

4.padding:string类型,表示卷积的形式,是否考虑边界,值为“SAME”和“VALID”
"SAME"是考虑边界,不足的时候用填充周围(即input为多大,output就多大),"VALID"则不考虑边界。

5.use_cudnn_on_gpu: bool类型,是否使用cudnn加速,默认为true。

"""

# #case1:简单卷积实验
# #输入是1张 3*3 大小的图片,图像通道数是5,卷积核是 1*1 大小,数量是1 ,步长是[1,1,1,1],
# #最后得到一个 3*3 的feature map。1张图最后输出就是一个 shape为[1,3,3,1] 的tensor。
# 
# # -*- coding:utf-8 -*-
# import tensorflow as tf
 
# Input = tf.Variable(tf.random_normal([1, 3, 3, 5]))
# Filter = tf.Variable(tf.random_normal([1, 1, 5, 1]))
# conv1 = tf.nn.conv2d(Input, Filter, strides=[1, 1, 1, 1], padding='VALID')  
 
# with tf.Session() as sess:
#     # 初始化变量
#     op_init = tf.global_variables_initializer()
#     sess.run(op_init)
#     a=sess.run(conv1)
#     print(a.shape)




# #case2:"VALID"、"SAME"的区别
# # 输入是1张 3*3 大小的图片,图像通道数是5,卷积核是 2*2大小,数量是1 ,步长是[1,1,1,1],
# # padding 设置为“VALID”,最后得到一个 2*2的feature map;“SAME”得到3*3的feature
# # 
# # -*- coding:utf-8 -*-
# import tensorflow as tf
 
# Input = tf.Variable(tf.random_normal([1, 3, 3, 5]))
# Filter = tf.Variable(tf.random_normal([2, 2, 5, 1]))
 
# conv2 = tf.nn.conv2d(Input, Filter, strides=[1, 1, 1, 1], padding='VALID')   #不自动填充边界
# conv3 = tf.nn.conv2d(Input, Filter, strides=[1, 1, 1, 1], padding='SAME')    #自动填充边界
# with tf.Session() as sess:
#     # 初始化变量
#     op_init = tf.global_variables_initializer()
#     sess.run(op_init)
#     print("不自动填充边界:",sess.run(conv2).shape)
#     print("自动填充边界:",sess.run(conv3).shape)




# #case3:输入多张图片的情况
# # 输入是2张 3*3 大小的图片,图像通道数是5,卷积核是 2*2大小,数量是1,步长是[1,1,1,1],
# # padding 设置为“SAME”,最后得到2个 3*3的feature map。1张图最后输出就是一个 shape为[2,3,3,1] 的tensor。

# # -*- coding:utf-8 -*-
# import tensorflow as tf
 
# Input = tf.Variable(tf.random_normal([2, 3, 3, 5]))
# Filter = tf.Variable(tf.random_normal([2, 2, 5, 1]))
 
# conv4 = tf.nn.conv2d(Input, Filter, strides=[1, 1, 1, 1], padding='SAME')
 
# with tf.Session() as sess:
#     # 初始化变量
#     op_init = tf.global_variables_initializer()
#     sess.run(op_init)
#     print(sess.run(conv4).shape)





# case4:输入多张图片、多个卷积核的情况
# 输入是4张 3*3 大小的图片,图像通道数是5,卷积核是 2*2大小,数量是4,步长是[1,1,1,1],
# padding 设置为“SAME”,最后每张图片得到4个 3*3的feature map。最后输出是shape为[4,3,3,4] 的tensor。

# -*- coding:utf-8 -*-
import tensorflow as tf
 
Input = tf.Variable(tf.random_normal([4, 3, 3, 5]))
Filter = tf.Variable(tf.random_normal([2, 2, 5, 4]))
 
conv5 = tf.nn.conv2d(Input, Filter, strides=[1, 1, 1, 1], padding='SAME')
 
with tf.Session() as sess:
    # 初始化变量
    op_init = tf.global_variables_initializer()
    sess.run(op_init)
    print(sess.run(conv5).shape)

2.tensorflow实现线性回归

import tensorflow as tf
import numpy as np

trX = np.linspace(-1, 1, 101)
trY = 2 * trX + np.random.randn(*trX.shape) * 0.33 # 创建一些线性值附近的随机值

X = tf.placeholder("float")
Y = tf.placeholder("float")

def model(X, w):
    return tf.multiply(X, w) # X*w线性求值,非常简单

w = tf.Variable(0.0, name="weights")
y_model = model(X, w)

cost = tf.square(Y - y_model) # 用平方误差做为优化目标

train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost) # 梯度下降优化

# 开始创建Session干活!
with tf.Session() as sess:
    # 首先需要初始化全局变量,这是Tensorflow的要求
    tf.global_variables_initializer().run()

    for i in range(100):
        for (x, y) in zip(trX, trY):
            sess.run(train_op, feed_dict={X: x, Y: y})
    print(sess.run(w))

3.tensorflow实现逻辑回归

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))

def model(X, w):
    return tf.matmul(X, w) # 模型还是矩阵乘法

#加载mnist数据集
mnist = input_data.read_data_sets("mnist/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels

X = tf.placeholder("float", [None, 784])   #784维,图片大小28*28
Y = tf.placeholder("float", [None, 10])    #10个标签,0-9

w = init_weights([784, 10])
py_x = model(X, w)   #使用模型进行计算

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)) # 计算误差
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct optimizer
predict_op = tf.argmax(py_x, 1)   #从结果py_x中找出每行最大的值的下标

with tf.Session() as sess:
    tf.global_variables_initializer().run()

    for i in range(10):
        for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
            sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})

        print(i, "精确度:",np.mean(np.argmax(teY, axis=1) == sess.run(predict_op, feed_dict={X: teX})))

4.实现浅层神经网络(sgimoid)

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

# 所有连接随机生成权值
def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))

#跟之前比只增加了一个隐藏层:sigmoid函数,精度提高了
def model(X, w_h, w_o):
    h = tf.nn.sigmoid(tf.matmul(X, w_h))    
    return tf.matmul(h, w_o)

mnist = input_data.read_data_sets("mnist/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels

X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10])

w_h = init_weights([784, 625])
w_o = init_weights([625, 10])

py_x = model(X, w_h, w_o)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)) # 计算误差损失
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct an optimizer
predict_op = tf.argmax(py_x, 1)

with tf.Session() as sess:
    tf.global_variables_initializer().run()

    for i in range(100):
        for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
            sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
        print(i, np.mean(np.argmax(teY, axis=1) ==
                         sess.run(predict_op, feed_dict={X: teX})))

5.浅层神经网络+一个隐藏层(relu函数)

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))


#增加了两个隐藏层:relu函数,精度提高
def model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden):
    X = tf.nn.dropout(X, p_keep_input)
    h = tf.nn.relu(tf.matmul(X, w_h))

    h = tf.nn.dropout(h, p_keep_hidden)
    h2 = tf.nn.relu(tf.matmul(h, w_h2))

    h2 = tf.nn.dropout(h2, p_keep_hidden)

    return tf.matmul(h2, w_o)

mnist = input_data.read_data_sets("mnist/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels

X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10])

w_h = init_weights([784, 625])
w_h2 = init_weights([625, 625])
w_o = init_weights([625, 10])

p_keep_input = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)

with tf.Session() as sess:
    # you need to initialize all variables
    tf.global_variables_initializer().run()

    for i in range(100):
        for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
            sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
                                          p_keep_input: 0.8, p_keep_hidden: 0.5})
        print(i, np.mean(np.argmax(teY, axis=1) ==
                         sess.run(predict_op, feed_dict={X: teX,
                                                         p_keep_input: 1.0,
                                                         p_keep_hidden: 1.0})))

6.浅层神经网络+cnn卷积

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

batch_size = 128
test_size = 256

def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))


#调用了卷积神经网络:cnn,精度提高
def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
    l1a = tf.nn.relu(tf.nn.conv2d(X, w,                       # l1a shape=(?, 28, 28, 32)
                        strides=[1, 1, 1, 1], padding='SAME'))
    l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1],              # l1 shape=(?, 14, 14, 32)
                        strides=[1, 2, 2, 1], padding='SAME')
    l1 = tf.nn.dropout(l1, p_keep_conv)

    l2a = tf.nn.relu(tf.nn.conv2d(l1, w2,                     # l2a shape=(?, 14, 14, 64)
                        strides=[1, 1, 1, 1], padding='SAME'))
    l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1],              # l2 shape=(?, 7, 7, 64)
                        strides=[1, 2, 2, 1], padding='SAME')
    l2 = tf.nn.dropout(l2, p_keep_conv)

    l3a = tf.nn.relu(tf.nn.conv2d(l2, w3,                     # l3a shape=(?, 7, 7, 128)
                        strides=[1, 1, 1, 1], padding='SAME'))
    l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1],              # l3 shape=(?, 4, 4, 128)
                        strides=[1, 2, 2, 1], padding='SAME')
    l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]])    # reshape to (?, 2048)
    l3 = tf.nn.dropout(l3, p_keep_conv)

    l4 = tf.nn.relu(tf.matmul(l3, w4))
    l4 = tf.nn.dropout(l4, p_keep_hidden)

    pyx = tf.matmul(l4, w_o)
    return pyx

mnist = input_data.read_data_sets("mnist/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX = trX.reshape(-1, 28, 28, 1)  # 28x28x1 input img
teX = teX.reshape(-1, 28, 28, 1)  # 28x28x1 input img

X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10])

w = init_weights([3, 3, 1, 32])       # 3x3x1 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64])     # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128])    # 3x3x32 conv, 128 outputs
w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
w_o = init_weights([625, 10])         # FC 625 inputs, 10 outputs (labels)

p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)

with tf.Session() as sess:
    # you need to initialize all variables
    tf.global_variables_initializer().run()

    for i in range(16):
        training_batch = zip(range(0, len(trX), batch_size),
                             range(batch_size, len(trX)+1, batch_size))
        for start, end in training_batch:
            sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
                                          p_keep_conv: 0.8, p_keep_hidden: 0.5})

        test_indices = np.arange(len(teX)) # Get A Test Batch
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:test_size]

        print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
                         sess.run(predict_op, feed_dict={X: teX[test_indices],
                                                         p_keep_conv: 1.0,
                                                         p_keep_hidden: 1.0})))

7.训练卷积神经网络+保存模型

import tensorflow as tf
from  tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import os

# 屏蔽waring信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

"""------------------加载数据---------------------"""
# 载入数据
mnist = input_data.read_data_sets("mnist/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
# 改变数据格式,为了能够输入卷积层
trX = trX.reshape(-1, 28, 28, 1)  # -1表示不考虑输入图片的数量,1表示单通道
teX = teX.reshape(-1, 28, 28, 1)

"""------------------构建模型---------------------"""
# 定义输入输出的数据容器
X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10])


# 定义和初始化权重、dropout参数
def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))


w1 = init_weights([3, 3, 1, 32])        # 3X3的卷积核,获得32个特征
w2 = init_weights([3, 3, 32, 64])       # 3X3的卷积核,获得64个特征
w3 = init_weights([3, 3, 64, 128])      # 3X3的卷积核,获得128个特征
w4 = init_weights([128 * 4 * 4, 625])   # 从卷积层到全连层
w_o = init_weights([625, 10])           # 从全连层到输出层

p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")


# 定义模型
def create_model(X, w1, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
    # 第一组卷积层和pooling层
    conv1 = tf.nn.conv2d(X, w1, strides=[1, 1, 1, 1], padding='SAME')
    conv1_out = tf.nn.relu(conv1)
    pool1 = tf.nn.max_pool(conv1_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    pool1_out = tf.nn.dropout(pool1, p_keep_conv)

    # 第二组卷积层和pooling层
    conv2 = tf.nn.conv2d(pool1_out, w2, strides=[1, 1, 1, 1], padding='SAME')
    conv2_out = tf.nn.relu(conv2)
    pool2 = tf.nn.max_pool(conv2_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    pool2_out = tf.nn.dropout(pool2, p_keep_conv)

    # 第三组卷积层和pooling层
    conv3 = tf.nn.conv2d(pool2_out, w3, strides=[1, 1, 1, 1], padding='SAME')
    conv3_out = tf.nn.relu(conv3)
    pool3 = tf.nn.max_pool(conv3_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    pool3 = tf.reshape(pool3, [-1, w4.get_shape().as_list()[0]])  # 转化成一维的向量
    pool3_out = tf.nn.dropout(pool3, p_keep_conv)

    # 全连层
    fully_layer = tf.matmul(pool3_out, w4)
    fully_layer_out = tf.nn.relu(fully_layer)
    fully_layer_out = tf.nn.dropout(fully_layer_out, p_keep_hidden)

    # 输出层
    out = tf.matmul(fully_layer_out, w_o)

    return out


model = create_model(X, w1, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)

# 定义代价函数、训练方法、预测操作
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(model, 1,name="predict")

# 定义一个saver
saver=tf.train.Saver()

# 定义存储路径
ckpt_dir="./ckpt_dir2"
if not os.path.exists(ckpt_dir):
    os.makedirs(ckpt_dir)

"""------------------训练模型---------------------"""
train_batch_size = 128  # 训练集的mini_batch_size=128
test_batch_size = 256   # 测试集中调用的batch_size=256
epoches = 5  # 迭代周期
with tf.Session() as sess:
    """-------训练模型--------"""
    # 初始化所有变量
    tf.global_variables_initializer().run()

    # 训练操作
    for i in range(epoches):
        train_batch = zip(range(0, len(trX), train_batch_size),range(train_batch_size, len(trX) + 1, train_batch_size))
        for start, end in train_batch:
            sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],p_keep_conv: 0.8, p_keep_hidden: 0.5})

        # 每个周期用测试集中随机抽出test_batch_size个图片进行测试
        test_indices = np.arange(len(teX))  # 返回一个array[0,1...len(teX)]
        np.random.shuffle(test_indices)     # 打乱这个array
        test_indices = test_indices[0:test_batch_size]

        # 获取测试集test_batch_size章图片的的预测结果
        predict_result = sess.run(predict_op, feed_dict={X: teX[test_indices],p_keep_conv: 1.0,p_keep_hidden: 1.0})
        
        # 获取真实的标签值
        true_labels = np.argmax(teY[test_indices], axis=1)

        # 计算准确率
        accuracy = np.mean(true_labels == predict_result)
        print("epoch", i, ":", accuracy)

        # 保存模型
        saver.save(sess,ckpt_dir+"/model.ckpt",global_step=i)

8.训练卷积神经网络+恢复模型

# 用Saver保存的模型的恢复

import tensorflow as tf
import numpy as np
import os
import cv2

# 屏蔽waring信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

"""------------------构建模型---------------------"""
# 定义输入输出的数据容器
X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10])


# 定义和初始化权重、dropout参数
def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))

w1 = init_weights([3, 3, 1, 32])        # 3X3的卷积核,获得32个特征
w2 = init_weights([3, 3, 32, 64])       # 3X3的卷积核,获得64个特征
w3 = init_weights([3, 3, 64, 128])      # 3X3的卷积核,获得128个特征
w4 = init_weights([128 * 4 * 4, 625])   # 从卷积层到全连层
w_o = init_weights([625, 10])           # 从全连层到输出层

p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")

# 定义模型
def create_model(X, w1, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
    # 第一组卷积层和pooling层
    conv1 = tf.nn.conv2d(X, w1, strides=[1, 1, 1, 1], padding='SAME')
    conv1_out = tf.nn.relu(conv1)
    pool1 = tf.nn.max_pool(conv1_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    pool1_out = tf.nn.dropout(pool1, p_keep_conv)

    # 第二组卷积层和pooling层
    conv2 = tf.nn.conv2d(pool1_out, w2, strides=[1, 1, 1, 1], padding='SAME')
    conv2_out = tf.nn.relu(conv2)
    pool2 = tf.nn.max_pool(conv2_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    pool2_out = tf.nn.dropout(pool2, p_keep_conv)

    # 第三组卷积层和pooling层
    conv3 = tf.nn.conv2d(pool2_out, w3, strides=[1, 1, 1, 1], padding='SAME')
    conv3_out = tf.nn.relu(conv3)
    pool3 = tf.nn.max_pool(conv3_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    pool3 = tf.reshape(pool3, [-1, w4.get_shape().as_list()[0]])  # 转化成一维的向量
    pool3_out = tf.nn.dropout(pool3, p_keep_conv)

    # 全连层
    fully_layer = tf.matmul(pool3_out, w4)
    fully_layer_out = tf.nn.relu(fully_layer)
    fully_layer_out = tf.nn.dropout(fully_layer_out, p_keep_hidden)

    # 输出层
    out = tf.matmul(fully_layer_out, w_o)

    return out


model = create_model(X, w1, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)

# 定义代价函数、训练方法、预测操作
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(model, 1,name="predict")

# 定义一个saver
saver=tf.train.Saver()

# 定义存储路径
ckpt_dir="./ckpt_dir2"

with tf.Session() as sess:

    """-----加载模型,用导入的图片进行测试--------"""
    # 载入图片
    src = cv2.imread('img/5.png')
    cv2.imshow("Pending pictures", src) # 待测图片

    # 将图片转化为28*28的灰度图
    src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    dst = cv2.resize(src, (28, 28), interpolation=cv2.INTER_CUBIC)

    # 将灰度图转化为1*784的能够输入的网络的数组
    picture = np.zeros((28, 28))
    for i in range(0, 28):
        for j in range(0, 28):
            picture[i][j] = (255 - dst[i][j])
    picture = picture.reshape(1, 28, 28, 1)

    # 载入模型
    saver.restore(sess,ckpt_dir+"/model.ckpt-1")

    # 进行预测
    predict_result = sess.run(predict_op, feed_dict={X: picture,p_keep_conv: 1.0,p_keep_hidden: 1.0})

    print("你导入的图片是:",predict_result[0])
    cv2.waitKey(0)

    # 关闭会话
    sess.close()

  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值