基于pytorch的神经网络/卷积自动编码器源码

神经网络的

import torch
import torch.nn as nn
import torch.utils.data as Data
from torchvision.datasets import MNIST
from torchvision import transforms as tfs
import matplotlib.pyplot as plt
import numpy as np

#读取数据
train_data = MNIST(root='./mnist/',train=True,transform=tfs.ToTensor())#60000张训练集
print(train_data.train_data.size())     # (60000, 28, 28)
print(train_data.train_labels.size())   # (60000)
plt.imshow(train_data.train_data[0].numpy())#生成第第1张图片,显示为彩色
plt.show()
train_loader = Data.DataLoader(dataset=train_data, batch_size=64, shuffle=True)#分批并打乱顺序

#定义自动编码器
class Autoencoder(nn.Module):
    def __init__(self):
        super(Autoencoder, self).__init__()

        self.encoder = nn.Sequential(
            nn.Linear(784, 400),
            nn.ReLU(True),
            nn.Linear(400, 200),
            nn.ReLU(True),
            nn.Linear(200, 100),
            nn.ReLU(True),
            nn.Linear(100,3)
        )

        self.decoder = nn.Sequential(
            nn.Linear(3, 100),
            nn.ReLU(True),
            nn.Linear(100, 200),
            nn.ReLU(True),
            nn.Linear(200, 400),
            nn.ReLU(True),
            nn.Linear(400, 784),
            nn.Tanh()
        )
    def forward(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return encoded, decoded

autoencoder = Autoencoder()
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=0.005)    #优化方式
loss_func = nn.MSELoss()                                            #损失函数  均方误差

#创建一个画布
f, a = plt.subplots(2, 10, figsize=(10, 2)) #初始化数字 在图表中创建子图显示的图像是2行10列的.figize(长,宽)
plt.ion()
#在交互模式下:plt.plot(x)或plt.imshow(x)是直接出图像,不需要plt.show()
#如果在脚本中使用ion()命令开启了交互模式,没有使用ioff()关闭的话,则图像会一闪而过,并不会常留。要想防止这种情况,
# 需要在plt.show()之前加上ioff()命令。

# 用于查看原始数据
view_data = train_data.train_data[:10].view(-1, 28*28).type(torch.Tensor)/255
#print(view_data)
for i in range(10):
    a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)))
    a[0][i].set_xticks(())
    a[0][i].set_yticks(())  #设置位置

#开始训练
for epoch in range(10):
    for step, (x, b_label) in enumerate(train_loader):   #可同时获得索引和值
        #print(x.shape)           #64,1,28,28
        b_x = x.view(-1, 28*28)   # batch x, shape (batch, 28*28)
        #print(b_x.shape)         #64*784
        b_y = x.view(-1, 28*28)   # batch y, shape (batch, 28*28)

        encoded, decoded = autoencoder(b_x)

        loss = loss_func(decoded, b_y)      # 计算损失函数
        optimizer.zero_grad()               # 梯度清零
        loss.backward()                     # 反向传播
        optimizer.step()                    # 梯度优化

        if step % 100 == 0:        #每100步显示一次
            print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy())

            #绘制解码图像
            encoded_data, decoded_data = autoencoder(view_data)
            #print(encoded_data.shape)
            for i in range(10):
                a[1][i].clear()
                a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)))
                a[1][i].set_xticks(()); a[1][i].set_yticks(())
            plt.draw(); plt.pause(0.05)#暂停0.05秒

plt.ioff()
plt.show()

卷积自动编码器

import torch
import torch.nn as nn
import torch.utils.data as Data
from torchvision.datasets import MNIST
from torchvision import transforms as tfs
import matplotlib.pyplot as plt
import numpy as np

#读取数据
train_data = MNIST(root='./mnist/',train=True,transform=tfs.ToTensor())#60000张训练集
print(train_data.train_data.size())     # (60000, 28, 28)
print(train_data.train_labels.size())   # (60000)
plt.imshow(train_data.train_data[0].numpy())#生成第第三张图片,显示的为彩色图像
plt.show()

train_loader = Data.DataLoader(dataset=train_data, batch_size=64, shuffle=True)#分批并打乱顺序

#定义自动编码器
class conv_autoencoder(nn.Module):
    def __init__(self):
        super(conv_autoencoder, self).__init__()

        self.encoder = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=3, stride=3, padding=1),  # (b, 16, 10, 10)
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=2),  # (b, 16, 5, 5)
            nn.Conv2d(16, 8, kernel_size=3, stride=2, padding=1),  # (b, 8, 3, 3)
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=1)  # (b, 8, 2, 2)
        )

        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(8, 16, kernel_size=3, stride=2),  # (b, 16, 5, 5)
            nn.ReLU(True),
            nn.ConvTranspose2d(16, 8, kernel_size=5, stride=3, padding=1),  # (b, 8, 15, 15)
            nn.ReLU(True),
            nn.ConvTranspose2d(8, 1, kernel_size=2, stride=2, padding=1),  # (b, 1, 28, 28)
            nn.Tanh()
        )

    def forward(self, x):
        encode = self.encoder(x)
        decode = self.decoder(encode)
        return encode, decode

autoencoder = conv_autoencoder()
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=0.005)#优化方式
loss_func = nn.MSELoss()                               #损失函数 均方误差

#开始训练
for epoch in range(1):
    for step, (x, b_label) in enumerate(train_loader):
        #print(x.shape)           #64,1,28,28
        b_x = x
        b_y = x

        encoded, decoded = autoencoder(b_x)
        loss = loss_func(decoded, b_y)      # 计算损失函数
        optimizer.zero_grad()               # 梯度清零
        loss.backward()                     # 反向传播
        optimizer.step()                    # 梯度优化

        if step % 100 == 0:        #每100步显示一次
            print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy())

#创建一个画布
f, a = plt.subplots(2, 10, figsize=(10, 2)) #初始化数字 在图表中创建子图显示的图像是2行5列的
plt.ion()
#在交互模式下:plt.plot(x)或plt.imshow(x)是直接出图像,不需要plt.show()
#如果在脚本中使用ion()命令开启了交互模式,没有使用ioff()关闭的话,则图像会一闪而过,并不会常留。要想防止这种情况,
# 需要在plt.show()之前加上ioff()命令。

# 用于查看原始数据
view_data = train_data.train_data[:10].view(-1,1,28,28).type(torch.Tensor)/255.
#print(view_data.shape)  10,1,28,28
for i in range(10):
    a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)))
    a[0][i].set_xticks(())
    a[0][i].set_yticks(())

encoded_data, decoded_data = autoencoder(view_data)
for i in range(10):
    a[1][i].clear()
    a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)))
    a[1][i].set_xticks(())
    a[1][i].set_yticks(())
plt.draw()
plt.pause(0.05)  # 暂停0.05秒
plt.ioff()
plt.show()

评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值