【人工智能】实验二基于CNN的图像分类

实验主要分为四个步骤:

  1. load datasets
  2. build network
  3. train
  4. test

构建的网络模型VGG13  一共13层

俺们老师给的待完善的代码如下,需要自己填写带下划线的部分

import  tensorflow as tf
from    tensorflow.keras import layers, optimizers, datasets, Sequential
import  os

os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
tf.random.set_seed(2345)

conv_layers = [ # 5 units of conv + max pooling
    #Please build vgg13 network according to the demonstration in readme file, bellow is a instance for unit 1
    #Please complete other parts. (unit 2 to unit 5)
    # unit 1
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 2
    #____________________________________________________________________________
    #____________________________________________________________________________
    #____________________________________________________________________________

    # unit 3
    #____________________________________________________________________________
    #____________________________________________________________________________
    #____________________________________________________________________________

    # unit 4
    #____________________________________________________________________________
    #____________________________________________________________________________
    #____________________________________________________________________________

    # unit 5
    #____________________________________________________________________________
    #____________________________________________________________________________
    #____________________________________________________________________________

]



def preprocess(x, y):
    # [0~1]
    x = tf.cast(x, dtype=tf.float32) / 255.
    y = tf.cast(y, dtype=tf.int32)
    return x,y


(x,y), (x_test, y_test) = datasets.cifar100.load_data()
y = tf.squeeze(y, axis=1)
y_test = tf.squeeze(y_test, axis=1)
print(x.shape, y.shape, x_test.shape, y_test.shape)


train_db = tf.data.Dataset.from_tensor_slices((x,y))
train_db = train_db.shuffle(1000).map(preprocess).batch(128)

test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db = test_db.map(preprocess).batch(64)

sample = next(iter(train_db))
print('sample:', sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))


def main():

    # [b, 32, 32, 3] => [b, 1, 1, 512]
    conv_net = Sequential(conv_layers)
# Please add your code in blank
    fc_net = Sequential([
        layers.Dense(____________________________),
        layers.Dense(____________________________),
        layers.Dense(100, activation=None), #you can try other activation function to evaluate and compare
    ])
# Please add your code in blank
    conv_net.build(input_shape=[None, ____, _____, ___])
    fc_net.build(input_shape=[None, ____])
    optimizer = optimizers.Adam(lr=1e-4)

    # [1, 2] + [3, 4] => [1, 2, 3, 4]
    variables = conv_net.trainable_variables + fc_net.trainable_variables

    for epoch in range(50):

        for step, (x,y) in enumerate(train_db):

            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 1, 1, 512]
                out = conv_net(x)
                # flatten, => [b, 512]
                out = tf.reshape(out, [-1, 512])
                # [b, 512] => [b, 100]
                logits = fc_net(out)
                # [b] => [b, 100]
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)
# Please add your code in blank
            grads = tape.gradient(___________, ______________)
            optimizer.apply_gradients(zip(__________, ___________))

            if step %100 == 0:
                print(epoch, step, 'loss:', float(loss))



        total_num = 0
        total_correct = 0
        for x,y in test_db:

            out = conv_net(x)
            out = tf.reshape(out, [-1, 512])
            logits = fc_net(out)
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)
            pred = tf.cast(pred, dtype=tf.int32)

            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)

        acc = total_correct / total_num
        print(epoch, 'acc:', acc)



if __name__ == '__main__':
    main()

这是我完善过后的代码

import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets, Sequential
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.random.set_seed(2345)

# 首先我们知道Sequential这个容器,接受一个13层的list.我们先组成list;网络的第一部分。
conv_layers = [  # 5 units of conv + max pooling
    # unit 1
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 2
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 3
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 4
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 5
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same')

]


# 进行数据预处理,仅仅是类型的转换。    [0~1]
def preprocess(x, y):
    x = tf.cast(x, dtype=tf.float32) / 255.
    y = tf.cast(y, dtype=tf.int32)
    return x, y


# load datasets 根据要求使用的是cifar100
(x, y), (x_test, y_test) = datasets.cifar100.load_data()
y = tf.squeeze(y)  # 或者tf.squeeze(y, axis=1)把1维度的squeeze掉。
y_test = tf.squeeze(y_test)  # 或者tf.squeeze(y, axis=1)把1维度的squeeze掉。
print(x.shape, y.shape, x_test.shape, y_test.shape)
# 训练集
train_db = tf.data.Dataset.from_tensor_slices((x, y))
train_db = train_db.shuffle(1000).map(preprocess).batch(128)
# 测试集
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_db = test_db.map(preprocess).batch(128)

# 测试一下sample的形状。
sample = next(iter(train_db))
print('sample:', sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))  # 值范围为[0,1]


def main():
    # 把参数放进Sequential容器
    # 输入:[b, 32, 32, 3] => 输出[b, 1, 1, 512]
    conv_net = Sequential(conv_layers)
    # conv_net.build(input_shape=[None, 32, 32, 3])
    # x = tf.random.normal([4, 32, 32, 3])
    # out = conv_net(x)
    # print(out.shape)

    # 创建全连接层网络;网络的第二部分;第二部分的输入为第一部分的输出。
    fc_net = Sequential([
        layers.Dense(256, activation=tf.nn.relu),
        layers.Dense(128, activation=tf.nn.relu),
        layers.Dense(100, activation=None),
    ])

    # 这里其实把一个网络分成2个来写,
    conv_net.build(input_shape=[None, 32, 32, 3])
    fc_net.build(input_shape=[None, 512])
    # 创建一个优化器
    optimizer = optimizers.Adam(lr=1e-4)
    conv_net.summary()
    fc_net.summary()

    # 下面的+表示拼接。python中的list列表拼接,2个列表变为一个。
    # 例如:[1, 2] + [3, 4] => [1, 2, 3, 4]
    variables = conv_net.trainable_variables + fc_net.trainable_variables
    for epoch in range(50):

        for step, (x, y) in enumerate(train_db):
            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 1, 1, 512]
                out = conv_net(x)
                # 之后squeeze或者reshape为平坦的flatten;flatten, => [b, 512]
                out = tf.reshape(out, [-1, 512])
                # 送入全连接层输入,得到输出logits
                # [b, 512] => [b, 100]
                logits = fc_net(out)
                # [b] => [b, 100]转换为热编码。
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss   结果维度[b]
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)

            # 梯度求解
            grads = tape.gradient(loss, variables)
            # 梯度更新
            optimizer.apply_gradients(zip(grads, variables))

            if step % 100 == 0:
                print(epoch, step, 'loss:', float(loss))

        # 做测试
        total_num = 0
        total_correct = 0
        for x, y in test_db:
            out = conv_net(x)
            out = tf.reshape(out, [-1, 512])
            logits = fc_net(out)
            # 预测可能性。
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1) 
            pred = tf.cast(pred, dtype=tf.int32)

            # 拿到预测值pred和真实值比较。
            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)  # 转换为numpy数据

        acc = total_correct / total_num
        print(epoch, 'acc:', acc)
        # colab源码:https://github.com/jupyter/colaboratory


if __name__ == '__main__':
    main()

在我机子上跑的结果,从上午十点开始跑的,一直跑了有十个小时,才跑了7/10,还没跑完。好慢啊。

Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d (Conv2D)             (None, 32, 32, 64)        1792      
                                                                 
 conv2d_1 (Conv2D)           (None, 32, 32, 64)        36928     
                                                                 
 max_pooling2d (MaxPooling2D  (None, 16, 16, 64)       0         
 )                                                               
                                                                 
 conv2d_2 (Conv2D)           (None, 16, 16, 128)       73856     
                                                                 
 conv2d_3 (Conv2D)           (None, 16, 16, 128)       147584    
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 8, 8, 128)        0         
 2D)                                                             
                                                                 
 conv2d_4 (Conv2D)           (None, 8, 8, 256)         295168    
                                                                 
 conv2d_5 (Conv2D)           (None, 8, 8, 256)         590080    
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 4, 4, 256)        0         
 2D)                                                             
                                                                 
 conv2d_6 (Conv2D)           (None, 4, 4, 512)         1180160   
                                                                 
 conv2d_7 (Conv2D)           (None, 4, 4, 512)         2359808   
                                                                 
 max_pooling2d_3 (MaxPooling  (None, 2, 2, 512)        0         
 2D)                                                             
                                                                 
 conv2d_8 (Conv2D)           (None, 2, 2, 512)         2359808   
                                                                 
 conv2d_9 (Conv2D)           (None, 2, 2, 512)         2359808   
                                                                 
 max_pooling2d_4 (MaxPooling  (None, 1, 1, 512)        0         
 2D)                                                             
                                                                 
=================================================================
Total params: 9,404,992
Trainable params: 9,404,992
Non-trainable params: 0
_________________________________________________________________
Model: "sequential_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 dense (Dense)               (None, 256)               131328    
                                                                 
 dense_1 (Dense)             (None, 128)               32896     
                                                                 
 dense_2 (Dense)             (None, 100)               12900     
                                                                 
=================================================================
Total params: 177,124
Trainable params: 177,124
Non-trainable params: 0
_________________________________________________________________
0 0 loss: 4.605703830718994
0 100 loss: 4.573154449462891
0 200 loss: 4.274768829345703
0 300 loss: 4.280937194824219
0 acc: 0.0626
1 0 loss: 4.13004207611084
1 100 loss: 4.003829002380371
1 200 loss: 3.8823888301849365
1 300 loss: 4.033702373504639
1 acc: 0.0968
2 0 loss: 3.9909071922302246
2 100 loss: 3.92575740814209
2 200 loss: 3.734340190887451
2 300 loss: 3.5258753299713135
2 acc: 0.1601
3 0 loss: 3.586297035217285
3 100 loss: 3.733517646789551
3 200 loss: 3.234541654586792
3 300 loss: 3.192173957824707
3 acc: 0.2045
4 0 loss: 3.4989943504333496
4 100 loss: 3.281524658203125
4 200 loss: 3.20217227935791
4 300 loss: 3.265291690826416
4 acc: 0.2305
5 0 loss: 3.2033958435058594
5 100 loss: 2.947575569152832
5 200 loss: 2.8855361938476562
5 300 loss: 2.885098934173584
5 acc: 0.2497
6 0 loss: 2.8828682899475098
6 100 loss: 3.0514018535614014
6 200 loss: 2.7332396507263184
6 300 loss: 3.0623221397399902
6 acc: 0.2771
7 0 loss: 3.0236001014709473
7 100 loss: 2.7354226112365723
7 200 loss: 2.4895710945129395
7 300 loss: 2.585233211517334
7 acc: 0.2853
8 0 loss: 2.621670722961426
8 100 loss: 2.807131767272949
8 200 loss: 2.6461825370788574
8 300 loss: 2.770937204360962
8 acc: 0.3049
9 0 loss: 2.8133463859558105
9 100 loss: 2.390254020690918
9 200 loss: 2.6979241371154785
9 300 loss: 2.122256278991699
9 acc: 0.3216
10 0 loss: 2.4884862899780273
10 100 loss: 2.3237156867980957
10 200 loss: 1.976022481918335
10 300 loss: 1.9411729574203491
10 acc: 0.341
11 0 loss: 2.3988051414489746
11 100 loss: 2.157597541809082
11 200 loss: 1.783387541770935
11 300 loss: 2.2747488021850586
11 acc: 0.3428
12 0 loss: 2.081052780151367
12 100 loss: 1.9125139713287354
12 200 loss: 1.9749314785003662
12 300 loss: 1.9473168849945068
12 acc: 0.3481
13 0 loss: 1.9426753520965576
13 100 loss: 1.6028225421905518
13 200 loss: 1.654457688331604
13 300 loss: 1.5368409156799316
13 acc: 0.348
14 0 loss: 1.9074410200119019
14 100 loss: 1.2604421377182007
14 200 loss: 1.33577299118042
14 300 loss: 1.4747681617736816
14 acc: 0.3456
15 0 loss: 1.408400535583496
15 100 loss: 1.4740254878997803
15 200 loss: 1.0276473760604858
15 300 loss: 1.2776360511779785
15 acc: 0.3468
16 0 loss: 1.1048237085342407
16 100 loss: 1.0582349300384521
16 200 loss: 0.7934116721153259
16 300 loss: 0.9454417824745178
16 acc: 0.3392
17 0 loss: 1.0203744173049927
17 100 loss: 0.5127657651901245
17 200 loss: 0.6567991971969604
17 300 loss: 0.74583899974823
17 acc: 0.3313
18 0 loss: 0.6991515159606934
18 100 loss: 0.49814605712890625
18 200 loss: 0.5801453590393066
18 300 loss: 0.5829309225082397
18 acc: 0.3317
19 0 loss: 0.4286673367023468
19 100 loss: 0.5325354337692261
19 200 loss: 0.3180204927921295
19 300 loss: 0.48299315571784973
19 acc: 0.3328
20 0 loss: 0.5821853280067444
20 100 loss: 0.24863868951797485
20 200 loss: 0.25470632314682007
20 300 loss: 0.23631633818149567
20 acc: 0.3273
21 0 loss: 0.328163743019104
21 100 loss: 0.19887247681617737
21 200 loss: 0.20321449637413025
21 300 loss: 0.37942832708358765
21 acc: 0.3366
22 0 loss: 0.3218492865562439
22 100 loss: 0.3823566734790802
22 200 loss: 0.2301541566848755
22 300 loss: 0.23679348826408386
22 acc: 0.3281
23 0 loss: 0.4278489351272583
23 100 loss: 0.17314794659614563
23 200 loss: 0.19846001267433167
23 300 loss: 0.15466056764125824
23 acc: 0.3397
24 0 loss: 0.3409762978553772
24 100 loss: 0.19322939217090607
24 200 loss: 0.2183304876089096
24 300 loss: 0.10360629856586456
24 acc: 0.3304
25 0 loss: 0.184882253408432
25 100 loss: 0.15770471096038818
25 200 loss: 0.11995077133178711
25 300 loss: 0.17878545820713043
25 acc: 0.3261
26 0 loss: 0.14563722908496857
26 100 loss: 0.10653193295001984
26 200 loss: 0.06353025138378143
26 300 loss: 0.10915520042181015
26 acc: 0.3338
27 0 loss: 0.159509539604187
27 100 loss: 0.049696896225214005
27 200 loss: 0.05399111658334732
27 300 loss: 0.13322849571704865
27 acc: 0.3307
28 0 loss: 0.36089855432510376
28 100 loss: 0.08262927830219269
28 200 loss: 0.18748503923416138
28 300 loss: 0.10662227123975754
28 acc: 0.3373
29 0 loss: 0.12509845197200775
29 100 loss: 0.08384230732917786
29 200 loss: 0.10343224555253983
29 300 loss: 0.13146759569644928
29 acc: 0.3421
30 0 loss: 0.16149979829788208
30 100 loss: 0.09493231028318405
30 200 loss: 0.11518850922584534
30 300 loss: 0.2519652545452118
30 acc: 0.3253
31 0 loss: 0.16040371358394623
31 100 loss: 0.08821642398834229
31 200 loss: 0.11975134164094925
31 300 loss: 0.2427465319633484
31 acc: 0.3342
32 0 loss: 0.11135876178741455
32 100 loss: 0.11297046393156052
32 200 loss: 0.11937586963176727
32 300 loss: 0.04514150321483612
32 acc: 0.3312
33 0 loss: 0.10594180226325989
33 100 loss: 0.15190201997756958
33 200 loss: 0.0702848881483078
33 300 loss: 0.03502190113067627
33 acc: 0.3291
34 0 loss: 0.15665759146213531
34 100 loss: 0.1422211229801178
34 200 loss: 0.08091887831687927
34 300 loss: 0.1349743902683258
34 acc: 0.3378
35 0 loss: 0.11525900661945343
35 100 loss: 0.06358155608177185
35 200 loss: 0.1529129147529602
35 300 loss: 0.22628331184387207
35 acc: 0.3382
36 0 loss: 0.15683573484420776
36 100 loss: 0.13646073639392853
36 200 loss: 0.06633880734443665
36 300 loss: 0.09798240661621094
36 acc: 0.341
37 0 loss: 0.12612700462341309
37 100 loss: 0.04630628228187561
37 200 loss: 0.18259760737419128
37 300 loss: 0.10138311982154846
37 acc: 0.3371
38 0 loss: 0.09454572945833206
38 100 loss: 0.0678207203745842
38 200 loss: 0.04612647742033005
38 300 loss: 0.041865911334753036
38 acc: 0.329
39 0 loss: 0.0841379314661026
39 100 loss: 0.05027864873409271
39 200 loss: 0.08458943665027618
39 300 loss: 0.06776276975870132
39 acc: 0.3399
40 0 loss: 0.14195799827575684
40 100 loss: 0.12060148268938065
40 200 loss: 0.13087618350982666
40 300 loss: 0.07757624983787537
40 acc: 0.3398
41 0 loss: 0.04287588596343994
41 100 loss: 0.10122890770435333
41 200 loss: 0.056345585733652115
41 300 loss: 0.04591142758727074
41 acc: 0.3436
42 0 loss: 0.08447226136922836
42 100 loss: 0.22661612927913666
42 200 loss: 0.1079418882727623
42 300 loss: 0.06236565485596657
42 acc: 0.3395
43 0 loss: 0.10079903900623322
43 100 loss: 0.10626668483018875
43 200 loss: 0.14811712503433228
43 300 loss: 0.10150552541017532
43 acc: 0.3464
44 0 loss: 0.10644478350877762
44 100 loss: 0.06013133376836777
44 200 loss: 0.06476859003305435
44 300 loss: 0.11906753480434418
44 acc: 0.3399
45 0 loss: 0.17771360278129578
45 100 loss: 0.05107204243540764
45 200 loss: 0.1423441469669342
45 300 loss: 0.02668618969619274
45 acc: 0.3408
46 0 loss: 0.17410549521446228
46 100 loss: 0.08211968839168549
46 200 loss: 0.04328247159719467
46 300 loss: 0.05685717985033989
46 acc: 0.3496
47 0 loss: 0.042486295104026794

47 100 loss: 0.1727692037820816

47 200 loss: 0.09772153943777084
47 300 loss: 0.034717023372650146
47 acc: 0.3411
48 0 loss: 0.18558627367019653
48 100 loss: 0.06087672710418701
48 200 loss: 0.04182719439268112
48 300 loss: 0.06226355582475662
48 acc: 0.3339
49 0 loss: 0.08151577413082123
49 100 loss: 0.09905587136745453
49 200 loss: 0.09194858372211456
49 300 loss: 0.0321841835975647
49 acc: 0.3387

Process finished with exit code 0

建议上colab上跑去,我也想去colab上跑,chrome打不开,卡在了第一步T_T。

12月7更新:下载了某助手之后,发现可以使用colab,同一个程序在colab上跑了大概有37分钟就跑完了,感觉跟在自己的机子上跑快太多太多了,下面是我在colab上的结果:

Downloading data from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz
169009152/169001437 [==============================] - 3s 0us/step
169017344/169001437 [==============================] - 3s 0us/step
(50000, 32, 32, 3) (50000,) (10000, 32, 32, 3) (10000,)
sample: (128, 32, 32, 3) (128,) tf.Tensor(0.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32)
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d (Conv2D)             (None, 32, 32, 64)        1792      
                                                                 
 conv2d_1 (Conv2D)           (None, 32, 32, 64)        36928     
                                                                 
 max_pooling2d (MaxPooling2D  (None, 16, 16, 64)       0         
 )                                                               
                                                                 
 conv2d_2 (Conv2D)           (None, 16, 16, 128)       73856     
                                                                 
 conv2d_3 (Conv2D)           (None, 16, 16, 128)       147584    
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 8, 8, 128)        0         
 2D)                                                             
                                                                 
 conv2d_4 (Conv2D)           (None, 8, 8, 256)         295168    
                                                                 
 conv2d_5 (Conv2D)           (None, 8, 8, 256)         590080    
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 4, 4, 256)        0         
 2D)                                                             
                                                                 
 conv2d_6 (Conv2D)           (None, 4, 4, 512)         1180160   
                                                                 
 conv2d_7 (Conv2D)           (None, 4, 4, 512)         2359808   
                                                                 
 max_pooling2d_3 (MaxPooling  (None, 2, 2, 512)        0         
 2D)                                                             
                                                                 
 conv2d_8 (Conv2D)           (None, 2, 2, 512)         2359808   
                                                                 
 conv2d_9 (Conv2D)           (None, 2, 2, 512)         2359808   
                                                                 
 max_pooling2d_4 (MaxPooling  (None, 1, 1, 512)        0         
 2D)                                                             
                                                                 
=================================================================
Total params: 9,404,992
Trainable params: 9,404,992
Non-trainable params: 0
_________________________________________________________________
Model: "sequential_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 dense (Dense)               (None, 256)               131328    
                                                                 
 dense_1 (Dense)             (None, 128)               32896     
                                                                 
 dense_2 (Dense)             (None, 100)               12900     
                                                                 
=================================================================
Total params: 177,124
Trainable params: 177,124
Non-trainable params: 0
_________________________________________________________________
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.
  super(Adam, self).__init__(name, **kwargs)
0 0 loss: 4.605703830718994
0 100 loss: 4.579543113708496
0 200 loss: 4.295363426208496
0 300 loss: 4.30647611618042
0 acc: 0.0664
1 0 loss: 4.071621894836426
1 100 loss: 3.9781930446624756
1 200 loss: 3.8503026962280273
1 300 loss: 3.9217281341552734
1 acc: 0.1171
2 0 loss: 3.844515562057495
2 100 loss: 3.8795487880706787
2 200 loss: 3.5778450965881348
2 300 loss: 3.471754312515259
2 acc: 0.1735
3 0 loss: 3.5877599716186523
3 100 loss: 3.5094151496887207
3 200 loss: 3.271711826324463
3 300 loss: 3.1189067363739014
3 acc: 0.2057
4 0 loss: 3.5270471572875977
4 100 loss: 3.260599136352539
4 200 loss: 3.163914203643799
4 300 loss: 3.1675050258636475
4 acc: 0.2258
5 0 loss: 3.282217025756836
5 100 loss: 2.930232524871826
5 200 loss: 2.9272775650024414
5 acc: 0.2513
6 0 loss: 2.9781484603881836
6 100 loss: 3.0117006301879883
6 200 loss: 2.797576904296875
6 300 loss: 3.0792269706726074
6 acc: 0.2693
7 0 loss: 3.0973868370056152
7 100 loss: 2.7252399921417236
7 200 loss: 2.588582992553711
7 300 loss: 2.557039260864258
7 acc: 0.2895
8 0 loss: 2.6511619091033936
8 100 loss: 2.7096669673919678
8 200 loss: 2.586071491241455
8 300 loss: 2.7542171478271484
8 acc: 0.3092
9 0 loss: 2.6784982681274414
9 100 loss: 2.245143413543701
9 200 loss: 2.6391384601593018
9 300 loss: 2.1213998794555664
9 acc: 0.3136
10 0 loss: 2.5744028091430664
10 100 loss: 2.397691249847412
10 200 loss: 2.04136323928833
10 300 loss: 2.032853841781616
10 acc: 0.3292
11 0 loss: 2.248678684234619
11 100 loss: 2.147101879119873
11 200 loss: 1.7349820137023926
11 300 loss: 2.121100425720215
11 acc: 0.3247
12 0 loss: 1.920336365699768
12 100 loss: 1.7461389303207397
12 200 loss: 1.8104661703109741
12 300 loss: 1.698201298713684
12 acc: 0.3321
13 0 loss: 1.660325050354004
13 100 loss: 1.674147367477417
13 200 loss: 1.4182236194610596
13 300 loss: 1.3978238105773926
13 acc: 0.3231
14 0 loss: 1.2530699968338013
14 100 loss: 0.9997458457946777
14 200 loss: 1.309718370437622
14 300 loss: 1.084842324256897
14 acc: 0.3129
15 0 loss: 1.193671703338623
15 100 loss: 0.9685628414154053
15 200 loss: 0.9058113694190979
15 300 loss: 1.01847505569458
15 acc: 0.317
16 0 loss: 0.9028932452201843
16 100 loss: 0.6653304100036621
16 200 loss: 0.46601372957229614
16 300 loss: 0.6217385530471802
16 acc: 0.3106
17 0 loss: 0.727854311466217
17 100 loss: 0.3981296718120575
17 200 loss: 0.5228483080863953
17 300 loss: 0.5998938679695129
17 acc: 0.2991
18 0 loss: 0.5805451273918152
18 100 loss: 0.3940545320510864
18 200 loss: 0.4722597301006317
18 300 loss: 0.45523616671562195
18 acc: 0.2987
19 0 loss: 0.6003919839859009
19 100 loss: 0.2240317463874817
19 200 loss: 0.3627362847328186
19 300 loss: 0.26470911502838135
19 acc: 0.3089
20 0 loss: 0.2560865879058838
20 100 loss: 0.3344946503639221
20 200 loss: 0.31515398621559143
20 300 loss: 0.39510685205459595
20 acc: 0.3026
21 0 loss: 0.4620325565338135
21 100 loss: 0.2081003040075302
21 200 loss: 0.1886335015296936
21 300 loss: 0.26929280161857605
21 acc: 0.3074
22 0 loss: 0.24272006750106812
22 100 loss: 0.22473227977752686
22 200 loss: 0.20395854115486145
22 300 loss: 0.18820872902870178
22 acc: 0.3083
23 0 loss: 0.22682049870491028
23 100 loss: 0.36543434858322144
23 200 loss: 0.07166758924722672
23 300 loss: 0.09640516340732574
23 acc: 0.3075
24 0 loss: 0.1450769156217575
24 100 loss: 0.05213901400566101
24 200 loss: 0.23339170217514038
24 300 loss: 0.07248969376087189
24 acc: 0.3092
25 0 loss: 0.2158544361591339
25 100 loss: 0.12670393288135529
25 200 loss: 0.20489054918289185
25 300 loss: 0.11157195270061493
25 acc: 0.314
26 0 loss: 0.1532250940799713
26 100 loss: 0.0750114768743515
26 200 loss: 0.3784523010253906
26 300 loss: 0.0847141444683075
26 acc: 0.3072
27 0 loss: 0.16163787245750427
27 100 loss: 0.0931471735239029
27 200 loss: 0.09946497529745102
27 300 loss: 0.1250903308391571
27 acc: 0.3134
28 0 loss: 0.13532555103302002
28 100 loss: 0.141266867518425
28 200 loss: 0.19733315706253052
28 300 loss: 0.16875949501991272
28 acc: 0.3121
29 0 loss: 0.1743878275156021
29 100 loss: 0.12369076907634735
29 200 loss: 0.10782884061336517
29 300 loss: 0.060996994376182556
29 acc: 0.3122
30 0 loss: 0.09318936616182327
30 100 loss: 0.1147213727235794
30 200 loss: 0.17644652724266052
30 300 loss: 0.07866644859313965
30 acc: 0.3107
31 0 loss: 0.14084084331989288
31 100 loss: 0.08011481910943985
31 200 loss: 0.07548494637012482
31 300 loss: 0.09042893350124359
31 acc: 0.3139
32 0 loss: 0.16660545766353607
32 100 loss: 0.07169085741043091
32 200 loss: 0.19020332396030426
32 300 loss: 0.11832849681377411
32 acc: 0.3076
33 0 loss: 0.230143740773201
33 100 loss: 0.072193942964077
33 200 loss: 0.0462278351187706
33 300 loss: 0.04750790074467659
33 acc: 0.3125
34 0 loss: 0.09428784996271133
34 100 loss: 0.10934901982545853
34 200 loss: 0.08477897942066193
34 300 loss: 0.07798942178487778
34 acc: 0.3133
35 0 loss: 0.21155236661434174
35 100 loss: 0.21073132753372192
35 200 loss: 0.0553957037627697
35 300 loss: 0.10374538600444794
35 acc: 0.3153
36 0 loss: 0.048893291503190994
36 100 loss: 0.06306926906108856
36 200 loss: 0.10471584647893906
36 300 loss: 0.08443345874547958
36 acc: 0.3165
37 0 loss: 0.06113031506538391
37 100 loss: 0.059913210570812225
37 200 loss: 0.15217798948287964
37 300 loss: 0.10389281809329987
37 acc: 0.3135
38 0 loss: 0.13415582478046417
38 100 loss: 0.13481742143630981
38 200 loss: 0.1045721173286438
38 300 loss: 0.09099530428647995
38 acc: 0.3146
39 0 loss: 0.09938441962003708
39 100 loss: 0.10667737573385239
39 200 loss: 0.10043273866176605
39 300 loss: 0.05317280814051628
39 acc: 0.313
40 0 loss: 0.07995403558015823
40 100 loss: 0.03227170556783676
40 200 loss: 0.03379109501838684
40 300 loss: 0.05735338479280472
40 acc: 0.3145
41 0 loss: 0.0556551069021225
41 100 loss: 0.02025899849832058
41 200 loss: 0.06852033734321594
41 300 loss: 0.09525291621685028
41 acc: 0.3089
42 0 loss: 0.14556632936000824
42 100 loss: 0.13306768238544464
42 200 loss: 0.10822974890470505
42 300 loss: 0.0783647745847702
42 acc: 0.3147
43 0 loss: 0.03061378188431263
43 100 loss: 0.09040059894323349
43 200 loss: 0.16824980080127716
43 300 loss: 0.08020259439945221
43 acc: 0.3084
44 0 loss: 0.13128815591335297
44 100 loss: 0.0176226869225502
44 200 loss: 0.07319459319114685
44 300 loss: 0.06256011128425598
44 acc: 0.319
45 0 loss: 0.10079637169837952
45 100 loss: 0.021738210693001747
45 200 loss: 0.1580265760421753
45 300 loss: 0.12092546373605728
45 acc: 0.3082
46 0 loss: 0.06563448160886765
46 100 loss: 0.1869114339351654
46 200 loss: 0.09348289668560028
46 300 loss: 0.037952713668346405
46 acc: 0.3089
47 0 loss: 0.18213742971420288
47 100 loss: 0.12236323207616806
47 200 loss: 0.05455945432186127
47 300 loss: 0.09804233908653259
47 acc: 0.3178
48 0 loss: 0.06900112330913544
48 100 loss: 0.15671207010746002
48 200 loss: 0.032541245222091675
48 300 loss: 0.1704862266778946
48 acc: 0.3204
49 0 loss: 0.014402791857719421
49 100 loss: 0.03425758704543114
49 200 loss: 0.03888377547264099
49 300 loss: 0.04487434774637222
49 acc: 0.3125

呕吼,只想说colab np! 

  • 2
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

二琳爱吃肉

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值