一般搭建
import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
x = Variable(torch.randn(4, 10))
y = Variable(torch.randn(4, 2))
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden1, n_hidden2, n_output):
super(Net, self).__init__()
self.Linear1 = torch.nn.Linear(n_feature, n_hidden1)
self.Linear2 = torch.nn.Linear(n_hidden1, n_hidden2)
self.out = torch.nn.Linear(n_hidden2, n_output)
def forward(self, x):
x = torch.nn.functional.relu(self.Linear1(x))
x = torch.nn.functional.relu(self.Linear2(x))
x = self.out(x)
return x
net = Net(10, 100, 100, 1)
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
loss_func = torch.nn.MSELoss()
for i in range(100):
out = net(x)
loss = loss_func(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
plt.scatter(i, loss.data.numpy(),marker='.')
print(loss.data.numpy())
plt.show()
快速搭建
import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
x = Variable(torch.randn(4, 10))
y = Variable(torch.randn(4, 2))
net = torch.nn.Sequential(
torch.nn.Linear(10, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 1)
)
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
loss_func = torch.nn.MSELoss()
for i in range(100):
out = net(x)
loss = loss_func(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
plt.scatter(i, loss.data.numpy(),marker='.')
print(loss.data.numpy())
plt.show()