tensorflow-AlexNet

总的流程看这两张图就差不多了

参考:

https://blog.csdn.net/weixin_43624538/article/details/83988998

https://blog.csdn.net/qq_28123095/article/details/79787782

代码如下:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class AlexNet():
    def __init__(self):
        self.learning_rate = 0.001
        self.training_iters = 10000  # 我这里只迭代一万次
        self.batch_size = 64  # 每个batch的大小
        self.display_step = 20  # 每20步展示一下结果

        # 定义网络参数
        self.n_input = 784  # 输入的维度
        self.n_classes = 10  # 标签的维度
        self.dropout = 0.8  # Dropout 的概率

    def conv2d(self, x, filter, k_size, stride=[1, 1], padding='same', dilation=[1, 1], activation=tf.nn.relu,
               scope='conv2d'):  # dilation 卷积层膨胀
        return tf.layers.conv2d(inputs=x, filters=filter, kernel_size=k_size,
                                strides=stride, dilation_rate=dilation, padding=padding,
                                name=scope, activation=activation)

#    def max_pool2d(self, x, pool_size=[2, 2], stride=[2, 2], scope='max_pool2d'):
#        return tf.layers.max_pooling2d(inputs=x, pool_size=pool_size, strides=stride, name=scope)
    def max_pool(self,name, l_input, k):
        return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)

    def dropoutx(self, x, d_rate=0.5):
        return tf.layers.dropout(inputs=x, rate=d_rate)

    def norm(self, x, l_size, bias=1.0, alpha=0.001 / 9.0, beta=0.75, scope='norm'):
        return tf.nn.lrn(x, l_size, bias=bias, alpha=alpha, beta=beta, name=scope)

    def set_net(self, x):
        check_points = {}
        x = tf.reshape(x, shape=[-1, 28, 28, 1])

        net = self.conv2d(x, filter=64, k_size=[3, 3], stride=[1,1], scope='conv1_1')
        #net = self.max_pool2d(net, pool_size=[2, 2], stride=[2,2], scope='pool1')
        net = self.max_pool('pool1', net, k=2)
        net = self.norm(net, l_size=4, scope='norm_1')
        net = self.dropoutx(net, d_rate=self.dropout)
        check_points['block1'] = net

        net = self.conv2d(net, filter=128, k_size=[3, 3], stride=[1,1], scope='conv2_1')
       # net = self.max_pool2d(net, pool_size=[2, 2], stride=[2,2], scope='pool2')
        net = self.max_pool('pool2', net, k=2)
        net = self.norm(net, l_size=4, scope='norm_2')
        net = self.dropoutx(net, d_rate=self.dropout)
        check_points['block2'] = net

        net = self.conv2d(net, filter=256, k_size=[3, 3], stride=[1,1], scope='conv5_1')
        #net = self.max_pool2d(net, pool_size=[2, 2], stride=[2,2], scope='pool3')
        net = self.max_pool('pool3', net, k=2)
        net = self.norm(net, l_size=4, scope='norm_3')
        net = self.dropoutx(net, d_rate=self.dropout)
        check_points['block3'] = net

        net = tf.reshape(net, [-1, 4 * 4 * 256])
        net = tf.layers.dense(net, units=1024, activation=tf.nn.relu, use_bias=True)
        net = tf.layers.dense(net, units=1024, activation=tf.nn.relu, use_bias=True)
        net = tf.layers.dense(net, units=10, activation=tf.nn.relu, use_bias=True)
        check_points['block4'] = net
        return net

    def alex_prediction(self, mnist, scope='alexnet'):
        with tf.name_scope('inputs'):
            x = tf.placeholder(tf.float32, [None, self.n_input])
            y = tf.placeholder(tf.float32, [None, self.n_classes])

        pred = self.set_net(x)  # pred是计算完的值,此时还没归一化
        a = tf.nn.softmax(pred)  # a是归一化后的值。
        # 定义损失函数和学习步骤
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))  # 这个是损失loss
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(cost)  # 最小化loss
        # 测试网络
        correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        # 初始化所有的共享变量
        init = tf.initialize_all_variables()
        with tf.Session() as sess:
            sess.run(init)
            step = 1
            writer = tf.summary.FileWriter("E://TensorBoard//test", sess.graph)
            # Keep training until reach max iterations
            while step * self.batch_size < self.training_iters:  # 直到达到最大迭代次数,没考虑梯度!!!
                batch_xs, batch_ys = mnist.train.next_batch(self.batch_size)
                # 获取批数据
                sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
                if step % self.display_step == 0:  # 每一步里有64batch,64*20=1280
                    # 计算精度
                    acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys})
                    # 计算损失值
                    loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})
                    print("Iter " + str(step * self.batch_size) + ", Minibatch Loss= " + "{:.6f}".format(
                        loss) + ", Training Accuracy = " + "{:.5f}".format(acc))
                step += 1
            print("Optimization Finished!")
            # 计算测试精度
            print("Testing Accuracy:", sess.run(accuracy,
                                                feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256],
                                                           }))  # 拿前256个来测试
            print("Testing Result:", sess.run(a, feed_dict={x: mnist.test.images[63:64], y: mnist.test.labels[63:64],
                                                            }))  # 数组范围,从0开始,含左不含右

        writer.close()
if __name__ == '__main__':
    alex = AlexNet()
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    alex.alex_prediction(mnist)
    #{'a': <tf.Tensor 'dropout/mul:0' shape=(?, 14, 14, 64) dtype=float32>, 'b': <tf.Tensor 'dropout_1/mul:0' shape=(?, 7, 7, 128) dtype=float32>, 'c': <tf.Tensor 'dropout_2/mul:0' shape=(?, 4, 4, 256) dtype=float32>, 'd': <tf.Tensor 'fc2:0' shape=(?, 1024) dtype=float32>, 'e': <tf.Tensor 'add_2:0' shape=(?, 10) dtype=float32>}
#{'block1': <tf.Tensor 'norm_1:0' shape=(?, 14, 14, 64) dtype=float32>, 'block2': <tf.Tensor 'norm_2:0' shape=(?, 7, 7, 128) dtype=float32>, 'block3': <tf.Tensor 'norm_3:0' shape=(?, 3, 3, 256) dtype=float32>, 'block4': <tf.Tensor 'fc2:0' shape=(?, 1024) dtype=float32>, 'block5': <tf.Tensor 'add_2:0' shape=(?, 10) dtype=float32>}

 使用tf.layers.max_pooling2d的时候会产生下面那个shape(3,3,256)的情形导致out跟y的shape不匹配

测试代码:

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
weights = {
    'wd1': tf.Variable(tf.random_normal([4 * 4 * 256, 1024])),
    'wd2': tf.Variable(tf.random_normal([1024, 1024])),
    'out': tf.Variable(tf.random_normal([1024, 10]))
}
biases = {
    'bd1': tf.Variable(tf.random_normal([1024])),
    'bd2': tf.Variable(tf.random_normal([1024])),
    'out': tf.Variable(tf.random_normal([10]))
}

def conv2d( x, filter, k_size, stride=[1, 1], padding='same', dilation=[1, 1], activation=tf.nn.relu,
           scope='conv2d'):  # dilation 卷积层膨胀
    return tf.layers.conv2d(inputs=x, filters=filter, kernel_size=k_size,
                            strides=stride, dilation_rate=dilation, padding=padding,
                            name=scope, activation=activation)

def max_pool( name, l_input, k):
    return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)


def dropoutx( x, d_rate=0.5):
    return tf.layers.dropout(inputs=x, rate=d_rate)


def norm( x, l_size, bias=1.0, alpha=0.001 / 9.0, beta=0.75, scope='norm'):
    return tf.nn.lrn(x, l_size, bias=bias, alpha=alpha, beta=beta, name=scope)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder('float',[None,784])
net = tf.reshape(x, shape=[-1, 28, 28, 1])

net = conv2d(net, filter=64, k_size=[3, 3], stride=[1, 1], scope='conv1_1')
# net = max_pool2d(net, pool_size=[2, 2], stride=[2,2], scope='pool1')
net = max_pool('pool1', net, k=2)
net_1 = norm(net, l_size=4, scope='norm_1')


net = conv2d(net_1, filter=128, k_size=[3, 3], stride=[1, 1], scope='conv2_1')
# net = max_pool2d(net, pool_size=[2, 2], stride=[2,2], scope='pool2')
net = max_pool('pool2', net, k=2)
net_2 = norm(net, l_size=4, scope='norm_2')



net = conv2d(net_2, filter=256, k_size=[3, 3], stride=[1, 1], scope='conv5_1')
# net = max_pool2d(net, pool_size=[2, 2], stride=[2,2], scope='pool3')
net = max_pool('pool3', net, k=3)
net_3 = norm(net, l_size=4, scope='norm_3')


net_final_1 = tf.reshape(net_3, [-1, weights['wd1'].get_shape().as_list()[0]])
net_final_2 = tf.nn.relu(tf.matmul(net_final_1,weights['wd1']) + biases['bd1'], name='fc1')
net_final_3 = tf.nn.relu(tf.matmul(net_final_2,weights['wd2']) + biases['bd2'], name='fc2')  # Relu activation

out = tf.matmul(net_final_3, weights['out']) + biases['out']

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    batch_xs, batch_ys = mnist.train.next_batch(64)

    net1 = sess.run(net_1,feed_dict={x:batch_xs})
    print(net1.shape)
    net2 = sess.run(net_2,feed_dict={x:batch_xs})
    print(net2.shape)
    net3 = sess.run(net_3,feed_dict={x:batch_xs})
    print(net3.shape)
    result_1 = sess.run(net_final_1,feed_dict={x:batch_xs})
    print(result_1.shape)
    result_2 = sess.run(net_final_2,feed_dict={x:batch_xs})
    print(result_2.shape)
    result_3 = sess.run(net_final_3,feed_dict={x:batch_xs})
    print(result_3.shape)
    out = sess.run(out,feed_dict={x:batch_xs})
    print(out.shape)

刚开始尝试用tf写,太菜了。。。

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值