回归问题
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
x=torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y=x.pow(2)+0.2*torch.rand(x.size())
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.hidden=nn.Sequential(nn.Linear(1,10),nn.ReLU(),)
self.predict=nn.Linear(10,1)
def forward(self,x):
x = self.hidden(x)
x = self.predict(x)
return x
net = Net()
print(net)
optimizer = torch.optim.SGD(net.parameters(),lr=0.5)
loss_func = nn.MSELoss()
for t in range(100):
prediction = net(x)
loss = loss_func(prediction,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t%5 ==0:
plt.cla()
plt.scatter(x.data, y.data)
plt.plot(x.data, prediction.data,'r-',lw =5)
plt.text(0.5,0,'Loss=%.4f' % loss.data,fontdict={'size':20,'color':'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
分类问题
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
n_data = torch.ones(100,2)
x0 = torch.normal(2*n_data,1)
y0 = torch.zeros(100)
x1 = torch.normal(-2*n_data,1)
y1 = torch.ones(100)
x = torch.cat((x0, x1), 0)
y = torch.cat((y0, y1), 0)
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.hidden=nn.Sequential(nn.Linear(2,10),nn.ReLU(),)
self.predict=nn.Linear(10,2)
def forward(self,x):
x = self.hidden(x)
x = self.predict(x)
return x
net = Net()
plt.show()
optimizer = torch.optim.SGD(net.parameters(),lr=0.02)
loss_func = nn.MSELoss()
for t in range(1,60):
prediction = F.softmax(net(x),dim=1)[:, 1]
loss = loss_func(prediction,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t%2 ==0:
plt.cla()
prediction = torch.max(F.softmax(net(x),dim=1),1)[1]
pred_y = prediction.squeeze()
plt.scatter(x.data[:,0],x.data[:, 1],c=pred_y,s=100,lw=0)
accuracy = sum(pred_y == y)/200.0
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
读取和保存网络
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
def save():
net1=torch.nn.Sequential(torch.nn.Linear(1,10),torch.nn.ReLU(),torch.nn.Linear(10,1))
optimizer = torch.optim.SGD(net1.parameters(),lr=0.5)
loss_func = torch.nn.MSELoss()
for t in range(100):
prediction= net1(x)
loss = loss_func(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.save(net1, 'net.pkl')
torch.save(net1.state_dict(), 'net_params.pkl')
def restore_net():
net2 = torch.load('net.pkl')
def restore_params():
net3=torch.nn.Sequential(
torch.nn.Linear(1,10),
torch.nn.ReLU(),
torch.nn.Linear(10,1)
)
net3.load_state_dict()
分批运行
import torch
import torch.utils.data as Data
BATCH_SIZE = 5
x = torch.linspace(1,10,10)
y = torch.linspace(10,1,10)
torch_dataset = Data.TensorDataset(x,y)
loader = Data.DataLoader(
dataset=torch_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=0
)
for epoch in range(3):
for step, (batch_x, batch_y) in enumerate(loader):
print('Epoch', epoch, '|Step:', step, '| batch x:', batch_x, '|batch y:', batch_y)
CNN手写数字识别
import torch
import torch.utils.data as Data
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
LR = 0.001
BATCH_SIZE = 50
EPOCH =1
DOWNLOAD_MNIST=False
train_data = torchvision.datasets.MNIST(
root='./mnist',
train=True,
transform = torchvision.transforms.ToTensor(),
download = DOWNLOAD_MNIST
)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
test_data = torchvision.datasets.MNIST(root="./mnist/", train=False)
test_x = torch.unsqueeze(test_data.data, dim=1).type(torch.FloatTensor)[:1000]/255
test_y = test_data.targets[:1000]
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=16,
kernel_size=5,
stride=1,
padding=2,
),
nn.ReLU(),nn.MaxPool2d(kernel_size=2))
self.conv2 = nn.Sequential(
nn.Conv2d(16, 32, 5, 1, 2), nn.ReLU(), nn.MaxPool2d(kernel_size=2))
self.out = nn.Linear(32*7*7, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1)
x = self.out(x)
return x
cnn = CNN()
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
output = cnn(x)
loss = loss_func(output, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
test_output =cnn(test_x)
pred_y = torch.max(test_output,1)[1].numpy().squeeze()
accuracy = sum(pred_y==test_y.numpy())/test_y.size(0)
print('Epoch:', epoch, '| train loss %.4f'% loss.data.numpy(), '| test accuracy:%.4f '% accuracy)
test_output = cnn(test_x[:1])
pred_y = torch.max(test_output, 1)[1].numpy().squeeze()
print(pred_y, 'pred_y')
print(test_y[:1])
plt.imshow(test_x[0].numpy().squeeze(), cmap='gray')
plt.show()
LSTM手写数字识别
import torch
import torch.utils.data as Data
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
LR = 0.001
BATCH_SIZE = 50
INPUT_SIZE = 28
TIMR_STEP = 28
EPOCH =1
DOWNLOAD_MNIST=False
train_data = torchvision.datasets.MNIST(
root='./mnist',
train=True,
transform = torchvision.transforms.ToTensor(),
download = DOWNLOAD_MNIST
)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
test_data = torchvision.datasets.MNIST(root="./mnist/", train=False)
test_x = torch.unsqueeze(test_data.data, dim=1).type(torch.FloatTensor)[:1000]/255
test_y = test_data.targets[:1000]
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.LSTM(
input_size=INPUT_SIZE,
hidden_size=64,
num_layers=1,
batch_first=True,
)
self.out = nn.Linear(64, 10)
def forward(self, x):
r_out, (h_n,h_c) =self.rnn(x, None)
out = self.out(r_out[:, -1, :])
return out
rnn = RNN()
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
x = x.view(-1, 28, 28)
output = rnn(x)
loss = loss_func(output, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
test_output =rnn(test_x.view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].numpy().squeeze()
accuracy = sum(pred_y==test_y.numpy())/test_y.size(0)
print('Epoch:', epoch, '| train loss %.4f'% loss.data.numpy(), '| test accuracy:%.4f '% accuracy)
test_output = rnn(test_x[:1].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].numpy().squeeze()
print(pred_y, 'pred_y')
print(test_y[:1])
plt.imshow(test_x[0].numpy().squeeze(), cmap='gray')
plt.show()
自编码图像压缩
import torch
import torch.utils.data as Data
import torch.nn as nn
import torchvision
import numpy as np
import matplotlib.pyplot as plt
LR = 0.02
INPUT_SIZE = 1
TIMR_STEP = 10
EPOCH =1
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN(
input_size=INPUT_SIZE,
hidden_size=32,
num_layers=1,
batch_first=True,
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state):
r_out, h_state = self.rnn(x, h_state)
return self.out(r_out), h_state
rnn = RNN()
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
loss_func = nn.MSELoss()
h_state = None
for step in range(1):
start, end = step*np.pi, (step+1)*np.pi
steps = np.linspace(start, end, TIMR_STEP, dtype=np.float32)
x_np = np.sin(steps)
y_np = np.cos(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis])
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
prediction = rnn(x, h_state)
h_state = h_state.data
loss = loss_func(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
RNN拟合sin到cos函数
import torch
import torch.utils.data as Data
import torch.nn as nn
import torchvision
import numpy as np
import matplotlib.pyplot as plt
LR = 0.001
BATCH_SIZE = 50
EPOCH =10
DOWNLOAD_MNIST=False
train_data = torchvision.datasets.MNIST(
root='./mnist',
train=True,
transform=torchvision.transforms.ToTensor(),
download=DOWNLOAD_MNIST
)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
test_data = torchvision.datasets.MNIST(root="./mnist/", train=False)
test_x = torch.unsqueeze(test_data.data, dim=1).type(torch.FloatTensor)[:1000]/255
test_y = test_data.targets[:1000]
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(28*28, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, 12),
nn.Tanh(),
nn.Linear(12, 3),
)
self.decoder = nn.Sequential(
nn.Linear(3, 12),
nn.Tanh(),
nn.Linear(12, 64),
nn.Tanh(),
nn.Linear(64, 128),
nn.Tanh(),
nn.Linear(128, 28*28),
nn.Sigmoid(),
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
autoencoder = AutoEncoder()
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.MSELoss()
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
b_x = x.view(-1, 28*28)
b_y = x.view(-1, 28*28)
encoded, decoded = autoencoder(b_x)
loss = loss_func(decoded, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()