无监督学习
自动编码器
import torch
from torch.utils.data import DataLoader
from torchvision import transforms,datasets
from ae import AE
from torch import nn,optim
import visdom
def main():
mnist_train=datasets.MNIST('mnist',True,transform=transforms.Compose([
transforms.ToTensor()
]),download=True)
mnist_train=DataLoader(mnist_train,batch_size=32,shuffle=True)
mnist_test= datasets.MNIST('mnist', True, transform=transforms.Compose([
transforms.ToTensor()
]), download=True)
mnist_test= DataLoader(mnist_test, batch_size=32, shuffle=True)
x,_=iter(mnist_train).next()
print('x:',x.shape)
device=torch.device('cuda')
model=AE().to(device)
criteon=nn.MSELOSS()
optimizer=optim.Adam(model.parameters(),lr=1e-3)
print(model)
viz=visdom.Visdom()
for epoch in range(1000):
for batchidx,(x,_) in enumerate(mnist_train):
x=x.to(device)
x_hat=model(x)
loss=criteon(x_hat,x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(epoch,'loss:',loss.item())
x,_=iter(mnist_test).next()
x=x.to(device)
with torch.no_grad():
x_hat=model(x)
viz.images(x,nrow=8,win='x',opts=dict(title='x'))
viz.images(x_hat,nrow=8,win='x_hat',opts=dict(title='x_hat'))
if __name__=='__main__':
main()
import torch
from torch import nn
class AE(nn.Module):
def __init__(self):
super(AE,self).__init__()
self.encoder=nn.Sequential(
nn.Linear(784,256),
nn.ReLU(),
nn.Linear(256,64),
nn.ReLU(),
nn.Linear(64, 20),
nn.ReLU()
)
self.decoder=nn.Sequential(
nn.Linear(20,64),
nn.Linear(64,256),
nn.ReLU(),
nn.Linear(256,784),
nn.Sigmoid()
)
def forward(self,x):
'''
:param self:
:param x:
:return:
'''
batchsz=x.size(0)
x=x.view(batchsz,784)
x=self.encoder(x)
x=self.decoder(x)
x=x.view(batchsz,1,28,28)
return x
变分自动编码器(VAE)
import torch
from torch import nn
class VAE(nn.Module):
def __init__(self):
super(VAE,self).__init__()
self.encoder=nn.Sequential(
nn.Linear(784,256),
nn.ReLU(),
nn.Linear(256,64),
nn.ReLU(),
nn.Linear(64, 20),
nn.ReLU()
)
self.decoder=nn.Sequential(
nn.Linear(10,64),
nn.Linear(64,256),
nn.ReLU(),
nn.Linear(256,784),
nn.Sigmoid()
)
def forward(self,x):
'''
:param self:
:param x:
:return:
'''
batchsz=x.size(0)
x=x.view(batchsz,784)
h=self.encoder(x)
h=self.encoder(x)
mu,sigma=h_.chunk(2,dim=1)
h=mu+sigma*torch.randn_like(sigma)
kld=0.5*torch.sum(
torch.pow(mu,2)+
torch.pow(sigma,2)-
torch.log(1e-8+torch.pow(sigma,2))-1
)/np.prod(x.shape)
x=self.decoder(x)
x=x.view(batchsz,1,28,28)
return x_hat, kld
import torch
from torch import nn
class VAE(nn.Module):
def __init__(self):
super(VAE,self).__init__()
self.encoder=nn.Sequential(
nn.Linear(784,256),
nn.ReLU(),
nn.Linear(256,64),
nn.ReLU(),
nn.Linear(64, 20),
nn.ReLU()
)
self.decoder=nn.Sequential(
nn.Linear(10,64),
nn.Linear(64,256),
nn.ReLU(),
nn.Linear(256,784),
nn.Sigmoid()
)
def forward(self,x):
'''
:param self:
:param x:
:return:
'''
batchsz=x.size(0)
x=x.view(batchsz,784)
h=self.encoder(x)
h=self.encoder(x)
mu,sigma=h_.chunk(2,dim=1)
h=mu+sigma*torch.randn_like(sigma)
kld=0.5*torch.sum(
torch.pow(mu,2)+
torch.pow(sigma,2)-
torch.log(1e-8+torch.pow(sigma,2))-1
)/(batchsz*28*28)
x=self.decoder(x)
x=x.view(batchsz,1,28,28)
return x_hat, kld