使用tensorflow2.0做fashion_mnist分类

tensorflow2.0版本比tensorflow1.x版本更加灵活方便了许多,所以本次用tensorflow2.0搭建卷积神经网络做了fashion_mnist数据集上的分类,作为入门训练。只使用了两层卷积神经网络,但是精确率也可以达到90%以上。

导入相应的模块,只需要tensorflow模块即可

import tensorflow as tf

对数据集做一个简单的预处理

def preprocess(x,y):
    x = tf.cast(x,dtype=tf.float32) / 255.0
    y = tf.cast(y,dtype=tf.int32)
    return x,y

对数据集进行加载并进行预处理

(train_x,train_y),(test_x,test_y) = tf.keras.datasets.fashion_mnist.load_data()

train_db = tf.data.Dataset.from_tensor_slices((train_x,train_y))
train_db = train_db.map(preprocess).shuffle(1000).batch(32)
test_db = tf.data.Dataset.from_tensor_slices((test_x,test_y))
test_db = test_db.map(preprocess).batch(32)

搭建网络结构

class Mymodel(tf.keras.Model):
    def __init__(self):
        super().__init__()
        self.conv1 = tf.keras.layers.Conv2D(filters=32,kernel_size=[3,3],padding='same',activation=tf.nn.relu)
        self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2,2],strides=[2,2])
        self.conv2 = tf.keras.layers.Conv2D(filters=64,kernel_size=[3,3],padding='same',activation=tf.nn.relu)
        self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2,2],strides=[2,2])
        self.flatten = tf.keras.layers.Flatten()
        self.fc1 = tf.keras.layers.Dense(64,activation=tf.nn.relu)
        self.fc2 = tf.keras.layers.Dense(10,activation=tf.nn.softmax)
    def call(self,inputs):
        x = self.conv1(inputs)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.fc2(x)
        return x

建立网络模型

model = Mymodel()
model.build(input_shape=(None,28,28,1))
model.summary()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

对网络进行训练,输出训练过程种的损失值和精确度

for epoch in range(10):
    train_loss = 0
    train_num = 0
    for x,y in train_db:
        x = tf.reshape(x, [-1, 28, 28, 1])
        with tf.GradientTape() as tape:
            pred = model(x)
            loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y,y_pred=pred)
            loss = tf.reduce_mean(loss)
        grads = tape.gradient(loss,model.trainable_variables)
        optimizer.apply_gradients(zip(grads,model.trainable_variables))
        train_loss += float(loss)
        train_num += x.shape[0]
    loss = train_loss / train_num

    total_correct = 0
    total_num = 0
    for x,y in test_db:
        x = tf.reshape(x,[-1,28,28,1])
        pred = model(x)
        pred = tf.argmax(pred,axis=1)
        pred = tf.cast(pred,dtype=tf.int32)
        correct = tf.equal(pred,y)
        correct = tf.reduce_sum(tf.cast(correct,dtype=tf.int32))
        total_correct += correct
        total_num += x.shape[0]
    accuracy = float(total_correct / total_num)
    print(epoch,'loss:',loss,'accuracy:',accuracy)

进行预测

print('.....................预测.............................')
for x,y in test_db:
    img = x
    label = y
    break

x = tf.reshape(x,[-1,28,28,1])
logits = model(x)
logits = tf.argmax(logits,axis=1)
logits = tf.cast(logits,dtype=tf.int32)

print('logits:',logits)
print('label:',label)

print('预测值和标签是否相等呢?',tf.equal(logits,y))

以下是完整的代码部分

import tensorflow as tf

def preprocess(x,y):
    x = tf.cast(x,dtype=tf.float32) / 255.0
    y = tf.cast(y,dtype=tf.int32)
    return x,y

(train_x,train_y),(test_x,test_y) = tf.keras.datasets.fashion_mnist.load_data()

train_db = tf.data.Dataset.from_tensor_slices((train_x,train_y))
train_db = train_db.map(preprocess).shuffle(1000).batch(32)
test_db = tf.data.Dataset.from_tensor_slices((test_x,test_y))
test_db = test_db.map(preprocess).batch(32)

class Mymodel(tf.keras.Model):
    def __init__(self):
        super().__init__()
        self.conv1 = tf.keras.layers.Conv2D(filters=32,kernel_size=[3,3],padding='same',activation=tf.nn.relu)
        self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2,2],strides=[2,2])
        self.conv2 = tf.keras.layers.Conv2D(filters=64,kernel_size=[3,3],padding='same',activation=tf.nn.relu)
        self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2,2],strides=[2,2])
        self.flatten = tf.keras.layers.Flatten()
        self.fc1 = tf.keras.layers.Dense(64,activation=tf.nn.relu)
        self.fc2 = tf.keras.layers.Dense(10,activation=tf.nn.softmax)
    def call(self,inputs):
        x = self.conv1(inputs)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.fc2(x)
        return x

model = Mymodel()
model.build(input_shape=(None,28,28,1))
model.summary()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

for epoch in range(10):
    train_loss = 0
    train_num = 0
    for x,y in train_db:
        x = tf.reshape(x, [-1, 28, 28, 1])
        with tf.GradientTape() as tape:
            pred = model(x)
            loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y,y_pred=pred)
            loss = tf.reduce_mean(loss)
        grads = tape.gradient(loss,model.trainable_variables)
        optimizer.apply_gradients(zip(grads,model.trainable_variables))
        train_loss += float(loss)
        train_num += x.shape[0]
    loss = train_loss / train_num

    total_correct = 0
    total_num = 0
    for x,y in test_db:
        x = tf.reshape(x,[-1,28,28,1])
        pred = model(x)
        pred = tf.argmax(pred,axis=1)
        pred = tf.cast(pred,dtype=tf.int32)
        correct = tf.equal(pred,y)
        correct = tf.reduce_sum(tf.cast(correct,dtype=tf.int32))
        total_correct += correct
        total_num += x.shape[0]
    accuracy = float(total_correct / total_num)
    print(epoch,'loss:',loss,'accuracy:',accuracy)

print('.....................预测.............................')
for x,y in test_db:
    img = x
    label = y
    break

x = tf.reshape(x,[-1,28,28,1])
logits = model(x)
logits = tf.argmax(logits,axis=1)
logits = tf.cast(logits,dtype=tf.int32)

print('logits:',logits)
print('label:',label)

print('预测值和标签是否相等呢?',tf.equal(logits,y))

 

  • 2
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

杨小嗨yang

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值