TensorFlow2.0基本操作(六)——利用VGG13在TensorFlow2.0上训练Cifar100

利用VGG13在TensorFlow2.0上训练Cifar100

import tensorflow as tf
from  tensorflow.keras import layers, optimizers, datasets, Sequential
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.random.set_seed(2345)



# Sequential 这个容器接受一个list,因此我们这里建立一个13层网络的list   (这里的13层是只关注卷积层和全连接层)
conv_layers = [ # 5 units of conv + max pooling   (这里指有五个‘2个卷积层+1个池化层’这样的单元)
    # units 1
    layers.Conv2D(64, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(64, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2],strides=2,padding='same'),

    # units 2
    layers.Conv2D(128, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(128, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding='same'),

    # units 3
    layers.Conv2D(256, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(256, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding='same'),

    # units 4
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding='same'),

    # units 5
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding='same')
    ]

# 4)设置下加载的数据集
# 4.1)预处理一下
def preprocess(x, y):
    # [-1~1]
    x = 2 * tf.cast(x, dtype=tf.float32)/255. - 1
    y = tf.cast(y, dtype=tf.int32)
    return x, y


# 4.2)数据的加载
# 此处按道理来讲tf会自动从官网上下载,但是由于数据量太大100多兆,容易出错(链接更新了),或者时间很慢
# 因此百度数据集提前下载好,放在文件夹“C:\Users\wanfuchun\.keras\datasets”无需解压,程序会自动解压
(x, y), (x_test, y_test) = datasets.cifar100.load_data()
y = tf.squeeze(y, axis=1)          # axis=1,把‘1’维度挤压掉
y_test = tf.squeeze(y_test, axis=1)          # axis=1,把‘1’维度挤压掉
print(x.shape, y.shape, x_test.shape, y_test.shape)


# 5)设置下dataset
train_db = tf.data.Dataset.from_tensor_slices((x, y))
train_db = train_db.shuffle(1000).map(preprocess).batch(64)

test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_db = test_db.map(preprocess).batch(64)

sample = next(iter(train_db))
print('sample:', sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))


def main():

    # 测试代码:

    # [b, 32, 32, 3] => [b, 1, 1, 3]
    # conv_net = Sequential(conv_layers)
    # conv_net.build(input_shape=[None, 32, 32, 3])
    # x = tf.random.normal([4, 32, 32, 3])
    # out = conv_net(x)
    # print(out.shape)

    # 输出为:
    # F:\Anaconda3\envs\gpu\python.exe H:/lesson13/cifar_100.py
    # (4, 1, 1, 512)
    #
    # Process finished with exit code 0

    # 这里卷积层和全连接层是分开写的,为什么分开,是因为中间有一个flat层操作,tf里是有这样的操作的,只不过在这里我们没有使用
    # 我们在这里之所以分开写,是为了使大家能够更清楚的看清网络的操作

    # 1)卷积层网络的创建
    # [b, 32, 32, 3] => [b, 1, 1, 3]
    conv_net = Sequential(conv_layers)

    # 2)全连接层网络的创建
    fc_net = Sequential([
        layers.Dense(256, activation=tf.nn.relu),
        layers.Dense(128, activation=tf.nn.relu),
        layers.Dense(100, activation=None)
    ])

    # 3)然后把以上两个网络给input_shape一下,设置下输入矩阵维度形状
    conv_net.build(input_shape=[None, 32, 32, 3])
    fc_net.build(input_shape=[None, 512])
    # 8)优化器的更新
    optimizer = optimizers.Adam(lr=1e-4)

    # 7)这里整个网络包括卷积层和全连接层两部分,因此这里的权重参数也包括两部分
    variables = conv_net.trainable_variables + fc_net.trainable_variables


    # 6)到此为止,数据集的部分已经加载完了,接下来设置一个测试的部分。
    for epoch in range(50):

        for step, (x, y) in enumerate(train_db):

            # [b, 32, 32, 3] => [b, 1, 1, 512]
            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 1, 1, 512]
                out = conv_net(x)
                # flatten, => [b, 512]
                out = tf.reshape(out, [-1, 512])
                # [b, 512] = [b, 100]
                logits = fc_net(out)
                # [b] => [b, 100]
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)

            grads = tape.gradient(loss, conv_net.trainable_variables)
            # 9)把定义好的优化器传过来
            optimizer.apply_gradients(zip(grads, variables))


            # 10)打印一下
            if step%100 == 0:
                print(epoch, step, 'loss:', float(loss))

        # 11)测试代码:(1)这里是在每一次数据集迭代完成之后(2)也可以在每一个100个step完成之后。测试越多,影响效率,不做测试,又不知道效果
        total_num = 0
        total_correct = 0
        for x, y in test_db:

            out = conv_net(x)
            out = tf.reshape(out, [-1, 512])
            logits = fc_net(out)
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)
            pred = tf.cast(pred, dtype=tf.int32)

            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num +=x.shape[0]
            total_correct += int(correct)

        acc = total_correct / total_num
        print(epoch, 'acc', acc)

# 所有的函数都写在main函数里,避免全局变量的一个污染
if __name__ == '__main__':
    main()

F:\Anaconda3\envs\gpu\python.exe H:/lesson13/cifar_100.py
(50000, 32, 32, 3) (50000,) (10000, 32, 32, 3) (10000,)
sample: (64, 32, 32, 3) (64,) tf.Tensor(-1.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32)
0 0 loss: 4.604959011077881
0 100 loss: 4.575377464294434
0 200 loss: 4.571146011352539
0 300 loss: 4.502490997314453
0 400 loss: 4.452817440032959
0 500 loss: 4.372075080871582
0 600 loss: 4.370511054992676
0 700 loss: 4.216601371765137
0 acc 0.0756
1 0 loss: 4.230457305908203
1 100 loss: 4.309189796447754
1 200 loss: 4.077847480773926
1 300 loss: 4.09382438659668
1 400 loss: 3.9294934272766113
1 500 loss: 3.70894718170166
1 600 loss: 4.001977920532227
1 700 loss: 3.8171377182006836
1 acc 0.1409
2 0 loss: 3.880859375
2 100 loss: 3.910504102706909
2 200 loss: 3.7650814056396484
2 300 loss: 3.84767746925354
2 400 loss: 3.5932629108428955
2 500 loss: 3.433152675628662
2 600 loss: 3.825737714767456
2 700 loss: 3.3847579956054688
2 acc 0.1797
3 0 loss: 3.600898027420044
3 100 loss: 3.619563341140747
3 200 loss: 3.5924160480499268
3 300 loss: 3.6292669773101807
3 400 loss: 3.436692714691162
3 500 loss: 3.140270709991455
3 600 loss: 3.546370029449463
3 700 loss: 3.1560215950012207
3 acc 0.2169
4 0 loss: 3.354435443878174
4 100 loss: 3.398003101348877
4 200 loss: 3.2819104194641113
4 300 loss: 3.3949060440063477
4 400 loss: 3.1485042572021484
4 500 loss: 2.8271279335021973
4 600 loss: 3.2873096466064453
4 700 loss: 2.9307360649108887
4 acc 0.2507
5 0 loss: 3.1113524436950684
5 100 loss: 3.1754555702209473
5 200 loss: 2.9519708156585693
5 300 loss: 3.1691126823425293
,,,,,,,,,,,,,
40 300 loss: 0.020486893132328987
40 400 loss: 0.15449829399585724
40 500 loss: 0.029347334057092667
40 600 loss: 0.062433935701847076
40 700 loss: 0.11583184450864792
40 acc 0.288
41 0 loss: 0.143940731883049
41 100 loss: 0.07662605494260788
41 200 loss: 0.06125793978571892
41 300 loss: 0.05026158690452576
41 400 loss: 0.03283253312110901
41 500 loss: 0.13406644761562347
41 600 loss: 0.09845204651355743
41 700 loss: 0.06047125160694122
41 acc 0.2943
42 0 loss: 0.012548993341624737
42 100 loss: 0.02632601372897625
42 200 loss: 0.10440327227115631
42 300 loss: 0.08434855937957764
42 400 loss: 0.15985724329948425
42 500 loss: 0.10367662459611893
42 600 loss: 0.04307159036397934
42 700 loss: 0.06772636622190475
42 acc 0.2944
43 0 loss: 0.03724997118115425
43 100 loss: 0.08360186219215393
43 200 loss: 0.030484747141599655
43 300 loss: 0.05947188287973404
43 400 loss: 0.02054227888584137
43 500 loss: 0.05295180156826973
43 600 loss: 0.04020894318819046
43 700 loss: 0.016904769465327263
43 acc 0.2912
44 0 loss: 0.05763118714094162
44 100 loss: 0.009715245105326176
44 200 loss: 0.21532860398292542
44 300 loss: 0.13623373210430145
44 400 loss: 0.3500138521194458
44 500 loss: 0.022981159389019012
44 600 loss: 0.0746481865644455
44 700 loss: 0.13544529676437378
44 acc 0.2931
45 0 loss: 0.0751943364739418
45 100 loss: 0.0914333313703537
45 200 loss: 0.009346842765808105
45 300 loss: 0.034759074449539185
45 400 loss: 0.0769815519452095
45 500 loss: 0.060862474143505096
45 600 loss: 0.09881328046321869
45 700 loss: 0.16600234806537628
45 acc 0.2889
46 0 loss: 0.043441079556941986
46 100 loss: 0.0640772357583046
46 200 loss: 0.1080295592546463
46 300 loss: 0.09161406755447388
46 400 loss: 0.04567646235227585
46 500 loss: 0.056166522204875946
46 600 loss: 0.23812463879585266
46 700 loss: 0.06230071187019348
46 acc 0.2924
47 0 loss: 0.07480883598327637
47 100 loss: 0.09990830719470978
47 200 loss: 0.08148270100355148
47 300 loss: 0.046455755829811096
47 400 loss: 0.14326196908950806
47 500 loss: 0.02140878140926361
47 600 loss: 0.06625264137983322
47 700 loss: 0.012734036892652512
47 acc 0.2917
48 0 loss: 0.04136260971426964
48 100 loss: 0.06168295815587044
48 200 loss: 0.07007324695587158
48 300 loss: 0.026165083050727844
48 400 loss: 0.13485026359558105
48 500 loss: 0.013557921163737774
48 600 loss: 0.06568337231874466
48 700 loss: 0.09925716370344162
48 acc 0.2899
49 0 loss: 0.08997322618961334
49 100 loss: 0.05200452357530594
49 200 loss: 0.010146246291697025
49 300 loss: 0.12437072396278381
49 400 loss: 0.041757695376873016
49 500 loss: 0.27597522735595703
49 600 loss: 0.010989501141011715
49 700 loss: 0.029163237661123276
49 acc 0.2956

Process finished with exit code 0

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值