import torch
import matplotlib.pyplot as plt
from torch import nn, optim
from time import perf_counter
x = torch.unsqueeze(torch.linspace(-3, 3, 10000), dim=1)
y = x.pow(3) + 0.3*torch.rand(x.size())
plt.scatter(x.numpy(), y.numpy(), s=0.01)
plt.show()
import torch
import matplotlib.pyplot as plt
from torch import nn, optim
from time import perf_counter
x = torch.unsqueeze(torch.linspace(-3, 3, 10000), dim=1)
y = x.pow(3) + 0.3 * torch.rand(x.size())
class Net(nn.Module):
def __init__(self, input_feature, num_hidden, outputs):
super(Net, self).__init__()
self.hidden = nn.Linear(input_feature, num_hidden)
self.out = nn.Linear(num_hidden, outputs)
def forward(self, x):
x = torch.nn.functional.relu(self.hidden(x))
x = self.out(x)
return x
CUDA = torch.cuda.is_available()
if CUDA:
net = Net(input_feature=1, num_hidden=20, outputs=1).cuda()
inputs = x.cuda()
target = y.cuda()
else:
net = Net(input_feature=1, num_hidden=20, outputs=1)
inputs = x
target = y
optimizer = optim.SGD(net.parameters(), lr=0.01)
criterion = nn.MSELoss()
for name, param in net.named_parameters(): #查看可优化的参数有哪些
if param.requires_grad:
print(name)
def draw(output, loss):
plt.cla() # 清空画布
if CUDA:
output = output.cpu() # 还原为cpu类型才能进行绘图
plt.scatter(x.numpy(), y.numpy(), s=0.001)
plt.plot(x.numpy(), output.data.numpy(), 'r-', lw=5)
plt.text(0.5, 0, 'loss=%s' % (loss.item()),
fontdict={'size': 20, 'color': 'red'})
plt.pause(0.005)
def train(model, criterion, optimizer, epochs):
for epoch in range(epochs):
outputs = model(inputs)
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 100 == 0:
draw(outputs, loss)
return model, loss
start = perf_counter()
model, loss = train(net, criterion, optimizer, 5000)
finish = perf_counter()
time = finish - start
print("计算时间:%s" % time)
print("final loss:", loss.item())
print("weights:", list(model.parameters()))
hidden.weight
hidden.bias
out.weight
out.bias
…
计算时间:16.568853700000545
final loss: 0.020885242149233818
weights: [Parameter containing:
tensor([[ 0.9034],
[ 0.1697],
[-1.6006],
[-1.3026],
[-1.5112],
[-0.1496],
[-1.3453],
[-1.1424],
[ 0.7827],
[-0.8794],
[-0.3463],
[-1.7504],
[-0.3597],
[ 1.7153],
[-0.5268],
[-0.1672],
[ 1.3576],
[ 1.5040],
[ 1.0761],
[ 1.3660]], device='cuda:0', requires_grad=True), Parameter containing:
tensor([-0.9302, 0.5086, -3.1035, -1.1606, -3.5677, -0.8621, -0.7312, 0.3425,
-1.6159, -2.2425, 1.0380, -2.4755, 1.0784, -4.1484, 1.1116, 0.5482,
-1.7674, -0.9522, -1.7016, -2.5550], device='cuda:0',
requires_grad=True), Parameter containing:
tensor([[ 1.2527, 0.4581, -3.3161, -1.4887, -3.6762, 0.0402, -1.0628, -0.1567,
1.6498, -2.3455, -0.4392, -2.8871, -0.6040, 4.2925, 0.7922, -0.0791,
2.1095, 1.6585, 1.9316, 2.8379]], device='cuda:0',
requires_grad=True), Parameter containing:
tensor([0.2591], device='cuda:0', requires_grad=True)]