Tensorflow实现的MNIST数据集的2层卷积2层全连接网络

import tensorflow as tf
"""
h=w 图片尺寸
f=卷积核
p=padding 边界填补 ‘SAME’补充
s=strides 每一次走的步长
(h-f+2*p)/s + 1
"""
# 10 分类,输入图片尺寸 784*784
n_input=784
n_output=10
# 获取数据MNIST
mnist=('data/',one_hot = True)

weights={
    # [3,3,1,64] 3*3 = h*w  卷积核, 1 channel, 64个特征图
    'wc1':tf.Variable(tf.random_normal([3,3,1,64],stddev=0.1)),
    'wc2':tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.1)),
    'wd1':tf.Variable(tf.random_normal([7*7*128,1024],stddev=0.1)),
    'wd2':tf.Variable(tf.random_normal([1024,n_output],stddev=0.1))
}
biases={
    'bc1':tf.Variable(tf.random_normal([64],stddev=0.1)),
    'bc2':tf.Variable(tf.random_normal([128],stddev=0.1)),
    'bd1':tf.Variable(tf.random_normal([1024],stddev=0.1)),
    'bd2':tf.Variable(tf.random_normal([n_output],stddev=0.1))
}

def conv_basic(input, w, b, keepratio):
    input_r=tf.reshape(input,shape=[-1, 28,28,1])

    conv1 = tf.nn.conv2d(input_r,w['wc1'],strides=[1,1,1,1],padding='SAME')
    conv1 = tf.nn.relu(tf.nn.bias_add(conv1,b['bc1']))
    pool1 = tf.nn.max_pool(conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    pool_dr1 = tf.nn.dropout(pool1,keepratio)

    conv2 = tf.nn.conv2d(pool_dr1,w['wc2'],strides=[1,1,1,1],padding='SAME')
    conv2 = tf.nn.relu(tf.nn.bias_add(conv2,b['bc2']))
    pool2 = tf.nn.max_pool(conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    pool_dr2 = tf.nn.dropout(pool2,keepratio)

    # 全连接层
    dense1 = tf.reshape(pool_dr2,[-1,w['wd1'].get_shape().as_list()[0]])
    fc1 = tf.nn.relu(tf.add(tf.matmul(dense1,w['wd1']),b['bd1']))
    fc_dr1 = tf.nn.dropout(fc1,keepratio)

    _out = tf.add(tf.matmul(fc_dr1,w['wd2']),b['bd2'])

    out ={
        'input_r':input_r,
        'conv1':conv1,
        'pool1':pool1, 'pool_dr1': pool_dr1,
        'conv2': conv2,
        'pool2': pool2, 'pool_dr2': pool_dr2,
        'dense1':dense1,
        'fc1':fc1,  'fc_dr1':fc_dr1,
        'out': _out
    }
    return out

x = tf.placeholder(tf.float32,[None,n_input])
y = tf.placeholder(tf.float32,[None,n_output])
keepratio = tf.placeholder(tf.float32)

_pred = conv_basic(x, weights, biases, keepratio)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred,y))
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

_corr = tf.equal(tf.argmax(_pred,1),tf.argmax(y,1))
accr = tf.redece_mean(tf.cast(_corr,tf.float32))

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

training_epochs = 15
batch_size = 16
display_step=1
for epoch in range(training_epochs):
    avg_cost=0.
    total_batch =10
    for i in range(total_batch):
        # 以 batch_size 大小来依次的获取数据
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        sess.run(optm,feed_dict={x:batch_xs,y:batch_ys,keepratio:0.7})
        avg_cost += sess.run(loss,feed_dict={x:batch_xs,y:batch_ys,keepratio:1.})/total_batch
    if epoch % display_step==0:
        print('Epoch: %03d/%03d loss: %9f'%(epoch,training_epochs, avg_cost))
        train_acc = sess.run(accr, feed_dict={x:batch_xs,y:batch_ys,keepratio:0.7})


 

  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值