CNN实现fashion_mnist数据集分类(tensorflow)

1、查看tensorflow版本

import tensorflow as tf

print('Tensorflow Version:{}'.format(tf.__version__))
print(tf.config.list_physical_devices())

在这里插入图片描述

2、加载fashion_mnist数据与预处理

import numpy as np
(train_images,train_labels),(test_images,test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# print(train_images.shape) # (60000, 28, 28)
# print(train_labels.shape) # (60000,)
# print(test_images.shape) # (10000, 28, 28)
# print(test_labels.shape) # (10000,)
train_images = np.expand_dims(train_images, -1)
# print(train_images.shape) # (个数, hight, width,channels)=(60000, 28, 28, 1)

3、CNN模型构建

from keras.layers import Input,Dense,Dropout
from keras.layers import Conv2D,MaxPool2D,GlobalAvgPool2D

model = tf.keras.Sequential()
model.add(Input(shape=(28,28,1)))  # train_images.shape[1:]
model.add(Conv2D(filters=64,kernel_size=(3,3),activation='relu',padding='same')) # 增加filter个数,增加模型拟合能力
model.add(Conv2D(filters=64,kernel_size=(3,3),activation='relu',padding='same'))
model.add(MaxPool2D())  # 默认2*2. 池化层扩大视野
model.add(Dropout(0.2)) # 防止过拟合
model.add(Conv2D(filters=128,kernel_size=(3,3),activation='relu',padding='same'))
model.add(Conv2D(filters=128,kernel_size=(3,3),activation='relu',padding='same'))
model.add(MaxPool2D())  # 默认2*2
model.add(Dropout(0.2)) # 防止过拟合
model.add(Conv2D(filters=256,kernel_size=(3,3),activation='relu'))
model.add(GlobalAvgPool2D()) # 全局平均池化
model.add(Dense(10,activation='softmax'))
model.summary()

在这里插入图片描述

4、模型配置与训练

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])
              
H = model.fit(x=train_images,
              y=train_labels,
              validation_split=0.2,
              # validation_data=(X_test,y_test),
              epochs=10,
              batch_size=64,
              verbose=1)

在这里插入图片描述

5、损失函数和准确率分析

根据损失函数和准确率,判断模型是否过拟合或者欠拟合,不断调整网络结构,使得模型最优。

import matplotlib.pyplot as plt
fig = plt.gcf()
fig.set_size_inches(12,4)
plt.subplot(1,2,1)
plt.plot(H.epoch, H.history['loss'], label='loss')
plt.plot(H.epoch, H.history['val_loss'], label='val_loss')
plt.legend()
plt.title('loss')

plt.subplot(1,2,2)
plt.plot(H.epoch, H.history['acc'], label='acc')
plt.plot(H.epoch, H.history['val_acc'], label='val_acc')
plt.legend()
plt.title('acc')

在这里插入图片描述

  • 12
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,以下是使用 TensorFlow Federated 实现Fashion-MNIST联邦学习的示例代码: ``` import collections import numpy as np import tensorflow as tf import tensorflow_federated as tff from tensorflow.keras.datasets import fashion_mnist # Load the Fashion-MNIST dataset (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() # Define the preprocessing function def preprocess(dataset): def batch_format_fn(element): return (tf.reshape(element['pixels'], [-1, 784]), tf.reshape(element['label'], [-1, 1])) return dataset.repeat(NUM_EPOCHS).batch(BATCH_SIZE).map(batch_format_fn) # Define the CNN model def create_model(): model = tf.keras.models.Sequential([ tf.keras.layers.Reshape(input_shape=(784,), target_shape=(28, 28, 1)), tf.keras.layers.Conv2D(32, 5, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(64, 5, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) return model # Define the Federated Averaging process iterative_process = tff.learning.build_federated_averaging_process( create_model_fn=create_model, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02), server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0), use_experimental_simulation_loop=True ) # Create a TFF dataset from the Fashion-MNIST dataset TrainElement = collections.namedtuple('TrainElement', 'pixels label') train_data = [] for i in range(len(x_train)): train_data.append(TrainElement(x_train[i], y_train[i])) train_dataset = tf.data.Dataset.from_generator(lambda: train_data, output_types=(tf.uint8, tf.uint8)) preprocessed_train_dataset = preprocess(train_dataset) # Define the hyperparameters BATCH_SIZE = 100 NUM_CLIENTS = 10 NUM_EPOCHS = 5 SHUFFLE_BUFFER = 500 # Simulate the Federated Averaging process def sample_clients(dataset, num_clients): client_ids = np.random.choice(range(len(dataset)), num_clients, replace=False) return [dataset[i] for i in client_ids] def evaluate(iterative_process, preprocessed_test_dataset): model = create_model() tff.learning.assign_weights_to_keras_model(model, iterative_process.get_model_weights()) keras_model = tff.learning.from_keras_model( model, input_spec=preprocessed_test_dataset.element_spec, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()] ) return keras_model.evaluate(preprocessed_test_dataset) state = iterative_process.initialize() for epoch in range(NUM_EPOCHS): sampled_clients = sample_clients(preprocessed_train_dataset, NUM_CLIENTS) federated_data = [sampled_clients[i:i+5] for i in range(0, len(sampled_clients), 5)] state, metrics = iterative_process.next(state, federated_data) print(f'Epoch {epoch + 1}, loss={metrics.loss}, accuracy={metrics.sparse_categorical_accuracy}') test_element = collections.namedtuple('TestElement', 'pixels label') test_data = [] for i in range(len(x_test)): test_data.append(test_element(x_test[i], y_test[i])) test_dataset = tf.data.Dataset.from_generator(lambda: test_data, output_types=(tf.uint8, tf.uint8)) preprocessed_test_dataset = preprocess(test_dataset) test_metrics = evaluate(iterative_process, preprocessed_test_dataset) print('Test accuracy:', test_metrics.sparse_categorical_accuracy) ``` 这个代码实现了基于TensorFlow Federated的Fashion-MNIST联邦学习。它使用卷积神经网络对Fashion-MNIST图像进行分类,使用FedAvg算法在多个客户端之间进行全局模型训练。代码包括以下步骤: 1. 加载Fashion-MNIST数据集,预处理数据并定义CNN模型。 2. 定义FedAvg算法的迭代过程,并初始化联邦学习状态。 3. 使用sample_clients()函数随机抽取n个客户端进行本地模型训练。 4. 使用next()函数将本地模型更新发送给服务器并聚合模型权重。 5. 使用evaluate()函数评估模型在测试数据集上的准确率。 6. 在整个训练过程中,迭代NUM_EPOCHS次。 希望这个例子可以帮助你实现Fashion-MNIST联邦学习。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值