程序
程序为转载的,如下
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_datamnist=input_data.read_data_sets("MNIST-data",one_hot=True)
#每个批次的大小
batch_size=100
#计算一共有多少个批次
n_batch=mnist.train.num_examples//batch_size
#初始化权值
def weight_variable(shape):
initial=tf.truncated_normal(shape,stddev=0.1)#生成一个截断的正态分布,
# shape表示生成张量的维度
return tf.Variable(initial) #tf.Variable()函数用来变量声明
#初始化偏置
def bias_variable(shape):
initial=tf.constant(0.1,shape=shape) #constant 为常量
return tf.Variable(initial)
#卷积层
def conv2d(x,W):
#x input tensor of shape '[batch,in_height,in_width,in_channles]'
#W filter / kernel tensor of shape [filter_height,filter_width,in_channels,out_channels]
#`strides[0] = strides[3] = 1`. strides[1]代表x方向的步长,strides[2]代表y方向的步长
#padding: A `string` from: `"SAME", "VALID"`
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')#2d的意思是二维的卷积操作
#池化层
def max_pool_2x2(x):
#ksize [1,x,y,1] ksize 2*2为窗口大小, strides 步长,分别为横向与纵向
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
#定义两个placeholder
x = tf.placeholder(tf.float32,[None,784])#28*28
y = tf.placeholder(tf.float32,[None,10])