目录
code
import torch
import torch.nn as nn
from sklearn import datasets
import matplotlib.pyplot as plt
# 0) prepare data
x_numpy, y_numpy = datasets.make_regression(n_samples = 100, n_features = 1, noise =20, randome_state =1)
X = torch.from_numpy(X_numpy.astype(np.float32))
y = torch.from_numpy(y_numpy.astype(np.float32))
y = y.view(y.shape[0], 1) # reshape data into a column vector
n_samples, n_features = X.shape
# 1) model
input_size = n_features
output_size = 1
model = nn.Linear(input_size, output_size)
# 2) loss and optimizer
learning_rate = 0.01
criterion = nn.MSE()
optimizer = torch.optim.SGD(criterion.parameters(), lr = learning_rate)
# 3) training loop
num_epochs = 100
for epoch in range(num_epochs):
# forward pass and loss calculation
y_pre = model(X)
loss = criterion(y_pre, y)
# backward pass, this step will sum up accumulated weights
loss.backward()
# update
optimizer.step()
optimizer.zero_grad()
if (epoch + 1) % 10 == 0:
print(f"epoch: {epoch +1}, loss = {loss.item():.4f}")
# now this model is the final mode
predicted = model(X).detach().numpy() # generate a new tensor and gradient configuration is faulse
plt.plot(x_numpy, y_numpy, "ro")
plt.plot(x_numpy, predicted, "b")
plt.show()
写在最后
此系列为以下Youtube视频教程的课程笔记