Tensorflow(二十八) —— 卷积神经网络(CNN)

1. layers.Conv2D

# ************************* layers.Conv2D
x = tf.random.normal([1,32,32,3])
layer = keras.layers.Conv2D(4,kernel_size = 5,strides = 1,padding = "valid")
"""
4表示卷积核个数N
5表示核大小,表示5*5
strides表示步长
padding “valid”表示不填充 “same”表示填充至输入输出大小一致
"""
out = layer(x)
print(out.shape)

layer = keras.layers.Conv2D(4,kernel_size = 5,strides =1 ,padding = "same")
out = layer(x)
print(out.shape)

layer = keras.layers.Conv2D(4,kernel_size = 5,strides = 2,padding = "same")
out = layer(x)
print(out.shape)

2. weight & bias

# **************************** weight & bias
layer = keras.layers.Conv2D(4,kernel_size = 5,strides = 2,padding= "same")
out = layer(x)
print("weight:",layer.kernel.shape)
print("bias:",layer.bias.shape)

3. nn.conv2d (需要自己维护)

# ***************************** nn.conv2d (需要自己维护)
w = tf.random.uniform([5,5,3,4]) 
"""
5 5 是卷积核的尺寸
3 是输入的通道数
4 是卷积核个数,也是输出通道数
"""
b = tf.zeros([4])
x = tf.random.normal([1,32,32,3])

out = tf.nn.conv2d(x,w,strides = 2,padding = "SAME" )
out += b
print(out.shape)

4 池化与采样

# MaxPool & AvgPool
x = tf.random.normal([1,14,14,4])

pool = keras.layers.MaxPool2D(pool_size = [2,2],strides = 2)
out1 = pool(x)
print("out1:",out1.shape)

pool = keras.layers.MaxPool2D(3,strides = 2)
out2 = pool(x)
print("out2:",out2.shape)

out3 = tf.nn.max_pool2d(x,ksize=[2,2],strides=2,padding="SAME")
print("out3:",out3.shape)

# upsample
x = tf.random.uniform([1,14,14,4])

layer = keras.layers.UpSampling2D(size = [3,3])

out = layer(x)
print(out.shape)

# ReLU
x = tf.random.normal([2,3])

out1 = tf.nn.relu(x)
print("out1:",out1.numpy())

out2 = keras.layers.ReLU()(x)
print("out2:",out2.numpy())

5 CIFAR100实战(13层网络结构)


# 导包
import tensorflow as tf
from tensorflow.keras import optimizers,layers,Sequential,datasets
# 加载数据
(x,y),(x_test,y_test) = datasets.cifar100.load_data()
x_train,x_val = tf.split(x,axis = 0,num_or_size_splits=[40000,10000])
y_train,y_val = tf.split(y,axis = 0,num_or_size_splits=[40000,10000])
y_test = tf.squeeze(y_test,axis = 1)
y_val = tf.squeeze(y_val,axis = 1)
y_train = tf.squeeze(y_train,axis = 1)
print(x_train.shape)
print(y_train.shape)
print(x_val.shape)
print(y_val.shape)
# 数据预处理
def preprocess(x,y):
    x = tf.cast(x,tf.float32)/255.
    y = tf.cast(y,tf.int32)
    return x,y
db_train = tf.data.Dataset.from_tensor_slices((x_train,y_train))
db_val = tf.data.Dataset.from_tensor_slices((x_val,y_val))
db_test = tf.data.Dataset.from_tensor_slices((x_test,y_test))

db_train = db_train.shuffle(10000).map(preprocess).batch(128)
db_test = db_test.shuffle(10000).map(preprocess).batch(128)
db_val = db_val.shuffle(10000).map(preprocess).batch(128)

sample = next(iter(db_train))
print(sample[0].shape)

# 搭建10CNN模型
conv_net = Sequential([ # [n,32,32,3] => [n,32,32,64]
                        layers.Conv2D(64,kernel_size = [3,3],strides = [1,1],padding = "same" ,activation = tf.nn.relu),
                        layers.Conv2D(64,kernel_size = [3,3],strides = [1,1],padding = "same",activation = tf.nn.relu),
                        # [n,32,32,64] => [n,16,16,64]
                        layers.MaxPool2D(pool_size = [2,2],strides = 2,padding = "same"),
                        # [n,16,16,64] => [n,8,8,128]
                        layers.Conv2D(128,kernel_size = [3,3],strides = [1,1],padding = "same",activation = tf.nn.relu),
                        layers.Conv2D(128,kernel_size = [3,3],strides = [1,1],padding = "same",activation = tf.nn.relu),
                        layers.MaxPool2D(pool_size = [2,2],strides = 2,padding = "same"),
                        # [n,8,8,128] => [n,4,4,256]
                        layers.Conv2D(256,kernel_size = [3,3],strides = [1,1],padding = "same",activation = tf.nn.relu),
                        layers.Conv2D(256,kernel_size = [3,3],strides = [1,1],padding = "same",activation = tf.nn.relu),
                        layers.MaxPool2D(pool_size = [2,2],strides = 2,padding = "same"),
                        # [n,4,4,256] => [n,2,2,512]
                        layers.Conv2D(512,kernel_size = [3,3],strides = [1,1],padding = "same",activation = tf.nn.relu),
                        layers.Conv2D(512,kernel_size = [3,3],strides = [1,1],padding = "same",activation = tf.nn.relu),
                        layers.MaxPool2D(pool_size = [2,2],strides = 2,padding = "same"),
                        # [n,2,2,512] => [n,1,1,512]
                        layers.Conv2D(512,kernel_size = [3,3],strides = [1,1],padding = "same",activation = tf.nn.relu),
                        layers.Conv2D(512,kernel_size = [3,3], strides = [1,1],padding = "same",activation = tf.nn.relu),
                        layers.MaxPool2D(pool_size = [2,2],strides = 2)   
])

conv_net.build(input_shape = [None,32,32,3])

# 搭建三层全连接层
fc_net = Sequential([
                    layers.Dense(256,activation = tf.nn.relu),
                    layers.Dense(128,activation = tf.nn.relu),
                    layers.Dense(100)
])
fc_net.build(input_shape = [None,512])

# 前向传播
variables = conv_net.trainable_variables + fc_net.trainable_variables
optimizer = optimizers.Adam(lr = 1e-4)
for epoch in range(10):
    for step,(x,y) in enumerate(db_train):
        with tf.GradientTape() as tape:
            # [n,32,32,3] => [n,1,1,512]
            conv_out = conv_net(x)
            
            # [n,1,1,512] => [n,512]
            fc_input = tf.reshape(conv_out,[-1,512])
            
            # [n,512] => [n,100]
            logits = fc_net(fc_input)
            
            y_onehot = tf.one_hot(y,depth=100)
            
            loss = tf.losses.categorical_crossentropy(y_onehot,logits,from_logits=True)
            
            loss = tf.reduce_mean(loss)
        
        grads = tape.gradient(loss,variables)
        optimizer.apply_gradients(zip(grads,variables))
        
        # 验证集测试
        
        if step%100 == 0:
            print("*"*20,"\n","epoch:",epoch,"\n","loss:",loss)
            num_total,num_correct = 0,0
            for x_val,y_val in db_val:
                conv_out = conv_net(x_val)
                fc_input = tf.reshape(conv_out,[-1,512])
                logits = fc_net(fc_input)
                prob = tf.nn.softmax(logits,axis = 1)
                pred = tf.argmax(prob,axis = 1)
                pred = tf.cast(pred,dtype=tf.int32)
                correct = tf.reduce_sum(tf.cast(tf.equal(y_val,pred),dtype = tf.int32))
                num_correct += correct
                num_total += x_val.shape[0]
            ACC = num_correct/num_total
            print("ACC for val:",ACC)
            

本文为参考龙龙老师的“深度学习与TensorFlow 2入门实战“课程书写的学习笔记

by CyrusMay 2022 04 18

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值