自编码器识别手写数字MNIST数据集

原理

 实现一

import tensorflow as tf
import numpy as np 
import matplotlib.pyplot as plt 
%matplotlib inline

# 下载MNIST数据集
(X_train,_), (X_test,_) = tf.keras.datasets.mnist.load_data()
 
#处理数据集,将数据集归一化,将28*28转化为784
X_train = X_train.astype('float32')/255.
X_test = X_test.astype('float32')/255.
X_train = X_train.reshape((len(X_train),-1))
X_test = X_test.reshape((len(X_test),-1))
print("x_train.shape",X_train.shape)
print("x_test.shape",X_test.shape)

#隐藏层为32
encoding_dim = 32
input_img = tf.keras.layers.Input(shape=(784,))
 
encoded = tf.keras.layers.Dense(encoding_dim,activation='relu')(input_img)
decoded = tf.keras.layers.Dense(784,activation='relu')(encoded)
 
autoencoder = tf.keras.Model(inputs = input_img,outputs=decoded)
encoder = tf.keras.Model(inputs=input_img,outputs=encoded)
 
encoded_input = tf.keras.layers.Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
deconder = tf.keras.Model(inputs=encoded_input,outputs = decoder_layer(encoded_input))
 
autoencoder.compile(optimizer='Adam',loss='mse')
autoencoder.fit(X_train,X_train,epochs=50,batch_size=256,shuffle=True,validation_data=(X_test,X_test))
 
encoded_imgs = encoder.predict(X_test)
decoded_imgs = deconder.predict(encoded_imgs)
 
#展示原始图像和解码后的图像
n = 10
for i in range(n):
	ax = plt.subplot(2,n,i+1)
	plt.imshow(X_test[i].reshape(28,28))
	plt.gray()
	ax.get_xaxis().set_visible(False)
	ax.get_yaxis().set_visible(False)
 
	ax = plt.subplot(2,n,i+1+n)
	plt.imshow(decoded_imgs[i].reshape(28,28))
	plt.gray()
	ax.get_xaxis().set_visible(False)
	ax.get_yaxis().set_visible(False)
plt.show()

结果如下:

 实现二

import torch
import torch.nn as nn
import numpy as np
import torch.utils.data as Data
import matplotlib.pyplot as plt

data = np.load('./mnist.npz')

x_train = data['x_train']/255.0
x_test = data['x_test']


class AutoEncoder(nn.Module):
    def __init__(self, hidden_size=200):
        super().__init__()
        self.encoder = nn.Linear(28*28, hidden_size)
        self.relu = nn.ReLU()
        self.decoder = nn.Linear(hidden_size, 28*28)
        self.sigmoid = nn.Sigmoid()
    def forward(self, X):
        X = X.reshape(-1, 28*28)
        encode = self.relu(self.encoder(X))
        decode = self.sigmoid(self.decoder(encode))
        # encode = self.encoder(X)
        # decode = self.decoder(encode)
        return encode, decode

class MyDataset(torch.utils.data.Dataset):
    def __init__(self, X):
        self.X = X
        self.Y = X
    
    def __len__(self):
        return len(self.X)

    def __getitem__(self, index):
        img = self.X[index]
        label = self.Y[index]
        # print(index)
        img = torch.tensor(img, dtype=torch.float32)
        label = torch.tensor(label, dtype=torch.float32)
        # print(img)
        return img, label


train=MyDataset(x_train)
train_loader=Data.DataLoader(dataset=train,batch_size=100,shuffle=True)


autoencoder = AutoEncoder(64)
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=0.01)
loss_func = nn.MSELoss()
losses = []
for epoch in range(10):
    for i, (X, Y) in enumerate(train_loader):
        X = X.view(-1, 28*28)
        Y = Y.view(-1, 28*28)
        encode, decode = autoencoder(X)
        loss = loss_func(decode, Y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        if i%100 == 0:
            print('loss:', epoch,loss.item())
            losses.append(loss.item())
    for param in optimizer.param_groups:
        param['lr'] = param['lr']*0.5
l = np.array(losses)
plt.plot(np.squeeze(l))
plt.show()

结果如下:

 

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值