代码:主要进行前向和梯度计算,没有进行反向传播
import numpy as np
x_values = [i for i in range(11)]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1,1)
print(x_train.shape)
y_values = [2*i+1 for i in x_values]
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)
print(y_train.shape)
import torch
import torch.nn as nn
class LinearRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearRegressionModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
out = self.linear(x)
return out
input_dim = 1
output_dim = 1
model = LinearRegressionModel(input_dim, output_dim)
epochs = 1
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
criterion = nn.MSELoss()
# torch.manual_seed(7) # cpu
# torch.cuda.manual_seed(7) #gpu
# np.random.seed(7) #numpy
# random.seed(7) #random and transforms
# torch.backends.cudnn.deterministic=True # cudnn
for epoch in range(epochs):
epoch += 1
inputs = torch.from_numpy(x_train)
labels = torch.from_numpy(y_train)
optimizer.zero_grad()
model.linear.weight.data = torch.tensor([2], dtype=torch.float32).reshape([1,1])
# model.linear.bias.data = torch.tensor([1], dtype=torch.float32).reshape([1,1])
# torch.tensor().reshape([1,1])
outputs = model(inputs)
loss = criterion(outputs, labels)
print(loss)
print(torch.sum(pow(outputs-labels, 2))/11)
print(outputs - labels)
loss.backward()
print(model.linear.weight.grad)
print(model.linear.bias.grad)
print(2*torch.mean(outputs - labels))
print(model.linear.weight)
print(model.linear.bias)
# optimizer.step()
if epoch % 50 == 0:
print('epoch {}, loss{}'.format(epoch, loss.item()))
MSELoss 就是
对bias进行求导就是
同理
代码2:进行一次反向传播
import numpy as np
x_values = [i for i in range(11)]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1,1)
print(x_train.shape)
y_values = [2*i+1 for i in x_values]
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)
print(y_train.shape)
import torch
import torch.nn as nn
class LinearRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearRegressionModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
out = self.linear(x)
return out
input_dim = 1
output_dim = 1
model = LinearRegressionModel(input_dim, output_dim)
epochs = 1
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
criterion = nn.MSELoss()
torch.manual_seed(7) # cpu
torch.cuda.manual_seed(7) #gpu
np.random.seed(7) #numpy
# random.seed(7) #random and transforms
torch.backends.cudnn.deterministic=True # cudnn
for epoch in range(epochs):
epoch += 1
inputs = torch.from_numpy(x_train)
labels = torch.from_numpy(y_train)
optimizer.zero_grad()
model.linear.weight.data = torch.tensor([2], dtype=torch.float32).reshape([1,1])
# model.linear.bias.data = torch.tensor([1], dtype=torch.float32).reshape([1,1])
# torch.tensor().reshape([1,1])
outputs = model(inputs)
loss = criterion(outputs, labels)
print(loss)
print(torch.sum(pow(outputs-labels, 2))/11)
print(outputs - labels)
loss.backward()
print(model.linear.weight)
print(model.linear.bias)
print(model.linear.weight.grad)
print(model.linear.bias.grad)
# print(2*torch.mean(outputs - labels))
optimizer.step()
print('After back propagation..............')
print(model.linear.weight)
print(model.linear.bias)
print(model.linear.weight.grad)
print(model.linear.bias.grad)
if epoch % 50 == 0:
print('epoch {}, loss{}'.format(epoch, loss.item()))