python实现卷积神经网络实现手写数字识别

工具: tensorflow ,opencv

import cv2
import numpy as np
import tensorflow as tf
np.set_printoptions(suppress=True)
def get_train_feature():
    labels = np.zeros((50, 5))
    labels[0:10,0] = 1
    labels[10:20,1] = 1
    labels[20:30,2] = 1
    labels[30:40,3] = 1
    labels[40:50,4] = 1
    d = os.walk("D:/train_pic")
    feature = []  # all pic
    names = []  # all names
    for root, dir, files in d:
        for file in files:
            names.append("D:/train_pic/" + str(file))
    for name in names:
        feat = cv2.imread(name, 0)
        feat = feat/255
        feature.append(feat)
   # return feature
    return np.reshape(feature,[-1,28,28,1]),labels
def cnn_train(x,y):

    #创建tensorflow图
    sess =tf.Session()
    #第一层神经网络参数
    xs = tf.placeholder(tf.float32,[None,28,28,1],name="xs")
    ys = tf.placeholder(tf.float32,[None,5],name="ys")
    # 卷积核5*5
    w_cnn1 = tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1))
    #偏置量
    b_cnn1 = tf.Variable(tf.truncated_normal([32],stddev=0.1))
    # 第一层卷积神经网络SAME保持图片大小不变,卷积核32个
    #relu防止梯度消失,神经元单边休整
    cnn1 = tf.nn.conv2d(xs,w_cnn1,strides = [1,1,1,1],padding="SAME")+b_cnn1
    #第一层池化,图片大小变为14*14
    pool1 = tf.nn.max_pool(cnn1,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
    #卷积核5*5
    w_cnn2 = tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1))
    b_cnn2 = tf.Variable(tf.truncated_normal([64],stddev=0.1))
    #第二层卷积神经网络
    cnn2 = tf.nn.conv2d(pool1,w_cnn2,strides=[1,1,1,1],padding="SAME")+b_cnn2
    #第二层池化,图片大小变为7*7
    pool2 = tf.nn.max_pool(cnn2,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
    #将数据打平,连入全连接层
    pool_flat = tf.reshape(pool2,[-1,7*7*64])
    #全连接层w1
    w_fc1 = tf.Variable(tf.zeros([7*7*64,128]))
    b_fc1 = tf.Variable(tf.zeros([128]))
    #layer1
    fc1 = tf.matmul(pool_flat,w_fc1)+ b_fc1
    #全连接层w2
    w_fc2 = tf.Variable(tf.truncated_normal([128,5],stddev=0.1))
    b_fc2 = tf.Variable(tf.truncated_normal([5],stddev=0.1))
    #layer2
    fc2 = tf.matmul(fc1,w_fc2)+b_fc2
    #softmax result
    sf_fc2 = tf.nn.softmax(fc2,name="gailv")
    #loss
    predict = ys*tf.log(tf.clip_by_value(sf_fc2,1e-8,1.0))
    loss = -tf.reduce_mean(predict)
    #train
    train = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
    init = tf.global_variables_initializer()
    sess.run(init)
    save = tf.train.Saver()
    for i in range(1000):
        print(sess.run(loss,feed_dict={xs:x,ys:y}))
        sess.run(train,feed_dict={xs:x,ys:y})
    save.save(sess,"D:/my_cnn_model.model")
def train_model():
    feature, labels = get_train_feature()  # opencv获取特征
    cnn_train(feature, labels)
def test():
    d = os.walk("D:/train_pic")
    feature = []  # all pic
    names = []  # all names
    for root, dir, files in d:
        for file in files:
            names.append("D:/train_pic/" + str(file))
    for name in names:
        feat = cv2.imread(name, 0)
        feat = feat/255
        feature.append(feat)
    return feature
def test2():
    feature = test()
    feature = np.reshape(feature,[-1,28,28,1])
    # saver = tf.train.Saver()
    with tf.Session() as sess:
        saver = tf.train.import_meta_graph('D:/my_cnn_model.model.meta')
        saver.restore(sess, 'D:/my_cnn_model.model')
        print(sess.run(tf.get_default_graph().get_tensor_by_name("gailv:0"), feed_dict={tf.get_default_graph().get_tensor_by_name("xs:0"): feature}))
if __name__ == "__main__":
    test2()
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值