tensorflow(6)-程序流程

整体过程
# 定义神经网络的结构和前向传播的输出结果。
# 定义损失函数以及选择反向传播优化的算法。
# 生成会话(tf.Session)并且在训练数据上反复运行反向传播优化算法。
# -------------------------------------------------------------
# 定义新的神经网络输入
x_input = tf.placeholder(tf.float32, shape=[None, IMG_W, IMG_H, 3], name='x-input')
y_ = tf.placeholder(tf.int16, shape=[None, N_CLASSES], name='y-input')

# 定义神经网络结构和优化算法
loss = ...
accuracy = ...
train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy_mean)
# 训练神经网络
for i in range(5000):
    # 通过选出batch_size个样本进行训练
    sess.run(train_step, feed_dict={x: x_train, y_: y_train})
    if i % 1000 == 0:
        # 每个个一段时间计算所有数据的交叉熵并输出
        total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})

验证和测试

validate_acc = sess.run(accuracy, feed_dict=validate_feed)
test_acc=sess.run(accuracy,feed_dict=test_feed)
构建网络模型

1,定义需要的卷积、池化和规范化操作

# 定义卷积操作
def conv2d(name, x, W, b, strides=1):
    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
    x = tf.nn.bias add(x, b)
    return tf.nn.relu(x, name=name) # 使用 relu 激活函数
# 定义池化层操作
def maxpool2d(name, x, k=2):
    return tf.nn.max pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],padding='SAME', name=name)
# 规范化操作
def norm(name, l input, lsize=4):
    return tf.nn.lrn(l input, lsize, bias=1.0, alpha=0.001 / 9.0,beta=0.75, name=name)

2,定义所有的网络参数

weights = {
    'wc1': tf.Variable(tf.random normal([11, 11, 1, 96])),
    'wc2': tf.Variable(tf.random normal([5, 5, 96, 256])),
    'wc3': tf.Variable(tf.random normal([3, 3, 256, 384])),
    'wc4': tf.Variable(tf.random normal([3, 3, 384, 384])),
    'wc5': tf.Variable(tf.random normal([3, 3, 384, 256])),
    'wd1': tf.Variable(tf.random normal([4*4*256, 4096])),
    'wd2': tf.Variable(tf.random normal([4096, 4096])),
    'out': tf.Variable(tf.random normal([4096, 10]))
}
biases = {
    'bc1': tf.Variable(tf.random normal([96])),
    'bc2': tf.Variable(tf.random normal([256])),
    'bc3': tf.Variable(tf.random normal([384])),
    'bc4': tf.Variable(tf.random normal([384])),
    'bc5': tf.Variable(tf.random normal([256])),
    'bd1': tf.Variable(tf.random normal([4096])),
    'bd2': tf.Variable(tf.random normal([4096])),
    'out': tf.Variable(tf.random normal([n classes]))
}

3,定义 AlexNet 的网络模型

def alex net(x, weights, biases, dropout):
    # Reshape input picture
    x = tf.reshape(x, shape=[-1, 28, 28, 1])
    # 第一层卷积
    # 卷积
    conv1 = conv2d('conv1', x, weights['wc1'], biases['bc1'])
    # 下采样
    pool1 = maxpool2d('pool1', conv1, k=2)
    # 规范化
    norm1 = norm('norm1', pool1, lsize=4)
    # 第二层卷积
    # 卷积
    conv2 = conv2d('conv2', conv1, weights['wc2'], biases['bc2'])
    # 最大池化(向下采样)
    pool2 = maxpool2d('pool2', conv2, k=2)
    # 规范化
    norm2 = norm('norm2', pool2, lsize=4)
    # 第三层卷积
    # 卷积
    conv3 = conv2d('conv3', norm2, weights['wc3'], biases['bc3'])
    # 下采样
    pool3 = maxpool2d('pool3', conv3, k=2)
    # 规范化
    norm3 = norm('norm3', pool3, lsize=4)
    # 第四层卷积
    conv4 = conv2d('conv4', norm3, weights['wc4'], biases['bc4'])
    # 第五层卷积
    conv5 = conv2d('conv5', norm3, weights['wc5'], biases['bc5'])
    # 下采样
    pool5 = maxpool2d('pool5', conv5, k=2)
    # 规范化
    norm5 = norm('norm5', pool5, lsize=4)
    # 全连接层 1
    fc1 = tf.reshape(norm5, [-1, weights['wd1'].get shape().as list()[0]])
    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # dropout
    fc1 = tf.nn.dropout(fc1, dropout)
    #全连接层 2
    fc2 = tf.reshape(fc1, [-1, weights['wd1'].get shape().as list()[0]])
    fc2 = tf.add(tf.matmul(fc2, weights['wd1']), biases['bd1'])
    fc2 = tf.nn.relu(fc2)
    # dropout
    fc2 = tf.nn.dropout(fc2, dropout)
    # 输出层
    out = tf.add(tf.matmul(fc2, weights['out']), biases['out'])
    return out

4,构建模型,定义损失函数和优化器,并构建评估函数:

# 构建模型
pred = alex net(x, weights, biases, keep prob)
# 定义损失函数和优化器
cost = tf.reduce mean(tf.nn.softmax cross entropy with logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning rate=learning rate).minimize(cost)
# 评估函数
correct pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce mean(tf.cast(correct pred, tf.float32))

5,训练模型和评估模型

# 初始化变量
init = tf.global variables initializer()
with tf.Session() as sess:
    sess.run(init)
    step = 1
    while step * batch size < training iters:
        batch x, batch y = mnist.train.next batch(batch size)
        sess.run(optimizer, feed dict={x: batch x, y: batch y,keep prob: dropout})
        if step % display step == 0:
        # 计算损失值和准确度,输出
        loss, acc = sess.run([cost, accuracy], feed dict={x: batch x, y: batch y,keep prob: 1.})
        print("Iter " + str(step*batch size) + ", Minibatch Loss= " + 
"{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
        step += 1
        print("Optimization Finished!")
# 计算测试集的准确度
print("Testing Accuracy:", sess.run(accuracy, feed dict={x: mnist.test.images[:256],y: mnist.test.labels[:256],keep prob: 1.}))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值