Tensorflow 教程编写AlexNet 在oxflower17上进行实验

喜欢的可以去我github上下载代码:https://github.com/hacker-wei/tensorflow-tutorial

在这个数据集上的识别率大概72%左右(我只训练了2000次),直接附上代码,仅供学习参考:

# _*_encoding=utf-8_*_

import datetime
import time
import datetime
import tensorflow as tf
import math
import tflearn.datasets.oxflower17 as oxflower17
batch_size = 32
all_poch = 2000
train_num = 1000
test_num = 360


#print(dir(oxflower17))
# 训练个数一共有1360张图片
ox17_image_width = 224
ox17_image_height = 224
ox17_image_depth = 3
ox17_num_labels = 17  #17个类别

# 前1000张图片作为训练集,后360张图片作为测试集
train_dataset_, train_labels_ = oxflower17.load_data(one_hot=True)
train_dataset_ox17, train_labels_ox17 = train_dataset_[:1000,:,:,:], train_labels_[:1000,:]
test_dataset_ox17, test_labels_ox17 = train_dataset_[1000:,:,:,:], train_labels_[1000:,:]
print(test_labels_ox17)

print('Training set', train_dataset_ox17.shape, train_labels_ox17.shape)
print('Test set', test_dataset_ox17.shape, test_labels_ox17.shape)

images = tf.placeholder(dtype = tf.float32, shape = [None,224,224,3])
y = tf.placeholder(dtype = tf.float32,shape = [None,17])
drop_prob = tf.placeholder(dtype=tf.float32,shape=[])
#分别获得名字和你的shape大小
def print_activation(t):
    print(t.op.name, ' ', t.get_shape().as_list())


def full_weight(shape):
    return tf.Variable((tf.random_normal(shape= shape,stddev=1e-4,dtype= tf.float32)))

def full_bias(shape):
    return(tf.Variable(tf.constant(0.1,dtype= tf.float32, shape=shape)))

#定义网络
def Net(images,drop):
    parameters =[]
    l2_loss = tf.constant(0.1)
    #  layer 1  前两层使用了lrn方法防止过拟合操纵了
    with tf.name_scope("conv1") as scopes:
        weight = tf.Variable(tf.truncated_normal([11, 11, 3, 64], stddev= 0.1, dtype = tf.float32 ),name= "weights")
        conv = tf.nn.conv2d(images,weight,strides = [1, 4, 4, 1], padding="SAME")
        biases = tf.Variable(tf.constant(0.0, dtype=tf.float32,shape=[64]), name="biases")
        bias = tf.add(conv,biases)
        conv1_output = tf.nn.relu(bias)
        print_activation(conv1_output)
    lrn1 = tf.nn.lrn(conv1_output, depth_radius=4, bias = 1.0,alpha= 0.001/9,beta= 0.75, name="lrn1")
    pool1 = tf.nn.max_pool(lrn1, [1, 3, 3, 1], [1, 2, 2, 1], padding="SAME", name="pool1")
    print_activation(pool1)
    parameters += [weight, biases]

    #layer 2
    with tf.name_scope("conv2") as scopes:
        weight = tf.Variable(tf.truncated_normal([5, 5, 64, 192],  stddev=0.1, dtype=tf.float32), name="weights")
        conv = tf.nn.conv2d(pool1, weight, [1, 1, 1, 1], padding="SAME")
        biases = tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[192]),name="biases")
        bias = tf.add(conv, biases)
        conv2_output = tf.nn.relu(bias)
        print_activation(conv2_output)
    lrn2 = tf.nn.lrn(conv2_output, 4, 1.0, 0.001 / 9, 0.75, "lrn2")
    pool2 = tf.nn.max_pool(lrn2, [1, 3, 3, 1], [1, 2, 2, 1], padding="SAME",  name="pool2")
    print_activation(pool2)
    parameters += [weight, biases]

    #layer 3
    with tf.name_scope("conv3") as scopes:
        weight = tf.Variable(tf.truncated_normal([3, 3, 192, 384], stddev=0.1, dtype=tf.float32), name="weights")
        conv = tf.nn.conv2d(pool2, weight, [1, 1, 1, 1], padding="SAME")
        biases = tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[384]), name="biases")
        bias = tf.add(conv, biases)
        conv3_output = tf.nn.relu(bias)
        print_activation(conv3_output)
    parameters += [weight, biases]

    #layer 4
    with tf.name_scope("conv4") as scopes:
        weight = tf.Variable(tf.truncated_normal([3, 3, 384, 256], stddev=0.1, dtype=tf.float32), name="weights")
        conv = tf.nn.conv2d(conv3_output, weight, [1, 1, 1, 1], padding="SAME")
        biases = tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[256]), name="biases")
        bias = tf.add(conv, biases)
        conv4_output = tf.nn.relu(bias)
        print_activation(conv4_output)
    parameters += [weight, biases]

    #layer 5
    with tf.name_scope("conv5") as scopes:
        weight = tf.Variable(tf.truncated_normal([3, 3, 256, 256], stddev=0.1, dtype=tf.float32), name="weights")
        conv = tf.nn.conv2d(conv4_output, weight, [1, 1, 1, 1], padding="SAME")
        biases = tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[256]),name="biases")
        bias = tf.add(conv, biases)
        conv5_output = tf.nn.relu(bias)
        print_activation(conv5_output)
    parameters += [weight, biases]
    pool5 = tf.nn.max_pool(conv5_output,[1, 3, 3, 1],[1, 2, 2, 1],padding="SAME",name="pool5")
    print_activation(pool5)

    with tf.name_scope("full_layer") as scopes:
        full_input = tf.reshape(pool5,[-1,7*7*256])
        W1 = full_weight([7*7*256,4096])
        b1 = full_bias([4096])
        l2_loss += tf.nn.l2_loss(W1)
        l2_loss += tf.nn.l2_loss(b1)
        full_mid1 = tf.nn.relu(tf.matmul(full_input,W1) + b1)
        full_drop1 = tf.nn.dropout(full_mid1,keep_prob=drop)

        W2 = full_weight([4096,4096])
        b2 = full_bias([4096])
        l2_loss += tf.nn.l2_loss(W2)
        l2_loss += tf.nn.l2_loss(b2)
        full_mid2 = tf.nn.relu(tf.matmul(full_drop1,W2)+b2)
        full_drop2 = tf.nn.dropout(full_mid2,keep_prob=drop)


        W3 = full_weight([4096, 17])
        b3 = full_bias([17])
        l2_loss += tf.nn.l2_loss(W3)
        l2_loss += tf.nn.l2_loss(b3)
        output = tf.matmul(full_drop2,W3) + b3

    return output,l2_loss



out,l2_loss = Net(images,drop_prob)

cross_entry = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=out))
#cross_entry = tf.reduce_mean(-tf.reduce_sum(y * tf.log(out),reduction_indices=1))
optimizer = tf.train.AdamOptimizer(0.00001).minimize(cross_entry)


correct_prediction = tf.equal(tf.argmax(out, 1), tf.argmax(y, 1))
        # Calculate accuracy

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

init = tf.global_variables_initializer()

if __name__ =="__main__":
    with tf.Session() as sess:
        sess.run(init)

        # 训练200次
        num = int(train_dataset_.shape[0] / batch_size)
        #for i in range(num):
           # start = (i * batch_size) % train_dataset_ox17.shape[0]
          #  end = min(start + batch_size, train_dataset_ox17.shape[0])
            # print(train_dataset_[start:end].shape)
           # cost1 = sess.run([cross_entry],
            #                   feed_dict={images: train_dataset_ox17[start:end], y: train_labels_ox17[start:end]})
          #  print(cost1)
        for epoch in range(all_poch):
            ave_cost =0
            for i in range(num):
                start = (i*batch_size)%train_dataset_ox17.shape[0]
                end = min(start+batch_size,train_dataset_ox17.shape[0])
                #print(train_dataset_[start:end].shape)
                cost, _ = sess.run([cross_entry, optimizer], feed_dict={images: train_dataset_ox17[start:end], y: train_labels_ox17[start:end], drop_prob:0.75})

                ave_cost +=cost /num
            #if (epoch + 1) % 10 == 0:
                #print(" poch : %04d " % (epoch + 1), " ", " cost : {:.9f}".format(cost))
            if (epoch +1 )%100==0:
                print("epoch : %04d"%(epoch +1)," ","cost :{:.9f}".format(ave_cost))
        num1 = int(test_dataset_ox17.shape[0] / batch_size)

        pred = 0.0
        for i in range(num1):

            start = (i * batch_size) % test_dataset_ox17.shape[0]
            end = min(start + batch_size, test_dataset_ox17.shape[0])
            accu = sess.run(accuracy,feed_dict={images:test_dataset_ox17[start:end],y:test_labels_ox17[start:end],drop_prob:1})
            # print(train_dataset_[start:end].shape)
            pred += accu /num1
        #print(accuracy.eval(feed_dict={images: test_dataset_ox17[start:end], y: test_labels_ox17[start:end]}))
        print("accuracy:{:.9f} ".format(pred))



以上就是我的代码,准确率已经达到了 :72%,你们可以自行下载去调试。

结果:

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 14
    评论
评论 14
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值