pytorch 查看编号_Pytorch之自编码实战-Cuda

import torch

import torchvision

from torchvision import datasets,transforms

from torch.autograd import Variable

import numpy as np

import matplotlib.pyplot as plt

%matplotlib inline

transform = transforms.Compose([

transforms.ToTensor(),

#transforms.Lambda(lambda x: x.repeat(3,1,1)), #转化为三通道,这里不合适

transforms.Normalize(mean=[0.5], std=[0.5])]) # 修改的位置,不能是[0.5,0.5,0.5],只取一个通道

dataset_train=datasets.MNIST(root="./data",transform=transform,train=True,download=True)

dataset_test=datasets.MNIST(root="./data", transform=transform,train=False)

train_load=torch.utils.data.DataLoader(dataset=dataset_train,batch_size=4,shuffle=True)

test_load=torch.utils.data.DataLoader(dataset=dataset_test,batch_size=4,shuffle=True)

images,label=next(iter(train_load))

print(images.shape)

images_example=torchvision.utils.make_grid(images)

images_example=images_example.numpy().transpose(1,2,0)

mean=0.5

std=0.5

images_example=images_example*std+mean

print([label[i] for i in range(4)])

plt.imshow(images_example)

plt.show()

noisy_images=images_example+0.5*np.random.randn(*images_example.shape)#images_example.shape前没有*就无法指定地址

noisy_images=np.clip(noisy_images, 0., 1.)

plt.imshow(noisy_images)

plt.show()

# 搭建网络进行编解码

class AutoEncoder(torch.nn.Module):

def __init__(self):

super(AutoEncoder,self).__init__()

self.encoder = torch.nn.Sequential(

torch.nn.Linear(28*28,128),

torch.nn.ReLU(),

torch.nn.Linear(128,64),

torch.nn.ReLU(),

torch.nn.Linear(64,32),

torch.nn.ReLU())

self.decoder = torch.nn.Sequential(

torch.nn.Linear(32,64),

torch.nn.ReLU(),

torch.nn.Linear(64,128),

torch.nn.ReLU(),

torch.nn.Linear(128,28*28))

def forward(self,input):

output = self.encoder(input)

output = self.decoder(output)

return output

model = AutoEncoder()

print(model)

Use_gpu=torch.cuda.is_available()

if Use_gpu:

model=model.cuda()

# 设置优化器和损失函数

optimizer = torch.optim.Adam(model.parameters())

loss_f = torch.nn.MSELoss()

#训练

epoch_n=5

for epoch in range(epoch_n):

running_loss=0.0

print("Epoch {}/{}".format(epoch+1,epoch_n))

print("-"*10)

for data in train_load:

X_train,_=data

noisy_X_train=X_train+0.5*torch.randn(X_train.shape)

noisy_X_train=torch.clamp(noisy_X_train, 0., 1.)

X_train,noisy_X_train=Variable(X_train.view(-1,28*28)),Variable(noisy_X_train.view(-1,28*28))# 将图像转为向量

X_train,noisy_X_train=X_train.cuda(),noisy_X_train.cuda()

train_pre=model(noisy_X_train)

loss=loss_f(train_pre,X_train)

optimizer.zero_grad()

loss.backward()

optimizer.step()

running_loss+=loss.item()

print("Loss is:{:.4f}".format(running_loss/len(dataset_train)))

# 搭建测试数据

data_loader_test=torch.utils.data.DataLoader(dataset=dataset_test,batch_size=4,shuffle=True)

X_test,_=next(iter(data_loader_test))

img1=torchvision.utils.make_grid(X_test)

img1=img1.numpy().transpose(1,2,0)

mean=[0.5,0.5,0.5]

std=[0.5,0.5,0.5]

img1=img1*std+mean

noisy_X_test=img1+0.5*np.random.randn(*img1.shape)

noisy_X_test=np.clip(noisy_X_test,0.,1.)

plt.figure()

plt.imshow(noisy_X_test)

# 测试效果

img2=X_test+0.5*torch.randn(*X_test.shape)

img2=torch.clamp(img2,0.,1.)

img2=Variable(img2.view(-1,28*28))#cpu计算用

img2=img2.cuda()#gpu计算用

test_pred=model(img2)

img_test=test_pred.data.view(-1,1,28,28)

img2=torchvision.utils.make_grid(img_test)

img2=img2.cpu()#下面要在cpu下计算

img2=img2.numpy().transpose(1,2,0)

img2=img2*std+mean

img2=np.clip(img2,0.,1.)

plt.figure()

plt.imshow(img2)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值