pytorch_工作流基础

what_were_covering = {
    1: "data (prepare and load)",
    2: "build model",
    3: "fitting the model to data (training)",
    4: "making predictions and evaluating a model (inference)",
    5: "saving and loading a model",
    6: "putting it all together"
}

import torch
from torch import nn
import matplotlib.pyplot as plt 

torch.__version__
'1.12.0'
weight = 0.7
bias = 0.3

# 创建数据
start = 0
end = 1
step = 0.02
X = torch.arange(start,end,step).unsqueeze(dim=1)
y = weight*X + bias
(torch.arange(start,end,step)).shape
torch.Size([50])
X.shape
torch.Size([50, 1])
y.shape
torch.Size([50, 1])
# Create train/test split
train_split = int(0.8 * len(X)) # 80% of data used for training set, 20% for testing 
X_train, y_train = X[:train_split], y[:train_split]
X_test, y_test = X[train_split:], y[train_split:]

len(X_train), len(y_train), len(X_test), len(y_test)
(40, 40, 10, 10)
def plot_predictions(
        train_data=X_train,
        train_labels=y_train,
        test_data=X_test,
        test_labels=y_test,
        predictions=None):
    plt.figure(figsize=(10, 7))

    plt.scatter(train_data, train_labels, c='b', s=4, label="Training data")

    plt.scatter(test_data, test_labels, c="g", s=4, label="Testing data")

    if predictions is not None:
        plt.scatter(test_data, predictions, c="r", s=4, label="Predictions")

    plt.legend(prop={"size": 14})

plot_predictions()
# Create a Linear Regression model class
# <- almost everything in PyTorch is a nn.Module (think of this as neural network lego blocks)
class LinearRegressionModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.x1 = torch.nn.Linear(1, 1)
        # self.model = torch.nn.Linear(1, 1)

    def forward(self, x):
        y1 = self.x1(x)
        # y2 = self.model(y1)
        return y1

# Set manual seed since nn.Parameter are randomly initialzied
torch.manual_seed(42)

# Create an instance of the model (this is a subclass of nn.Module that contains nn.Parameter(s))
model_0 = LinearRegressionModel()

# Check the nn.Parameter(s) within the nn.Module subclass we created
list(model_0.parameters())
[Parameter containing:
 tensor([[0.7645]], requires_grad=True),
 Parameter containing:
 tensor([0.8300], requires_grad=True)]
# List named parameters 
model_0.state_dict()
OrderedDict([('x1.weight', tensor([[0.7645]])), ('x1.bias', tensor([0.8300]))])
with torch.inference_mode():
    y_preds = model_0(X_test)
# Check the predictions
print(f"Number of testing samples: {len(X_test)}") 
print(f"Number of predictions made: {len(y_preds)}")
print(f"Predicted values:\n{y_preds}")
Number of testing samples: 10
Number of predictions made: 10
Predicted values:
tensor([[1.4416],
        [1.4569],
        [1.4722],
        [1.4875],
        [1.5028],
        [1.5181],
        [1.5334],
        [1.5487],
        [1.5640],
        [1.5793]])
plot_predictions(predictions=y_preds)

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-q3BtLFzy-1683687677714)(pytorch_01_%E5%B7%A5%E4%BD%9C%E6%B5%81%E5%9F%BA%E7%A1%80_files/pytorch_01_%E5%B7%A5%E4%BD%9C%E6%B5%81%E5%9F%BA%E7%A1%80_14_0.png)]

y_test - y_preds
tensor([[-0.5816],
        [-0.5829],
        [-0.5842],
        [-0.5855],
        [-0.5868],
        [-0.5881],
        [-0.5894],
        [-0.5907],
        [-0.5920],
        [-0.5933]])
loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(params=model_0.parameters(),
                            lr=0.001)

torch.manual_seed(42)

epochs =10000

train_loss_values = []
test_loss_values = []
epoch_count = []

for epoch in range(epochs):
    model_0.train()

    # 1、前向传播得到预测值
    y_pred = model_0(X_train)

    # 2、计算损失值
    loss = loss_fn(y_pred, y_train)

    # 3、初始化优化器
    optimizer.zero_grad()

    # 4、反向传播
    loss.backward()

    # 5、推进优化器
    optimizer.step()

    model_0.eval()

    with torch.inference_mode():
        # 1、前向传播测试数据
        test_pred = model_0(X_test)

        # 2、计算损失值
        test_loss = loss_fn(test_pred, y_test.type(torch.float))

        if epoch % 10 == 0:
            epoch_count.append(epoch)
            train_loss_values.append(loss.detach().numpy())
            test_loss_values.append(test_loss.detach().numpy())
            print(f"Epoch: {epoch} | MAE Train Loss: {loss} | MAE Test Loss: {test_loss} ")

Epoch: 0 | MAE Train Loss: 0.3084445595741272 | MAE Test Loss: 0.34289056062698364 
Epoch: 10 | MAE Train Loss: 0.2931530773639679 | MAE Test Loss: 0.3211340308189392 
Epoch: 20 | MAE Train Loss: 0.2783251404762268 | MAE Test Loss: 0.
Epoch: 1300 | MAE Train Loss: 0.0024774749763309956 | MAE Test Loss: 0.009499110281467438 
Epoch: 1310 | MAE Train Loss: 0.002444359939545393 | MAE Test Loss: 0.009375977329909801 
Epoch: 1320 | MAE Train Loss: 0.0024114663247019053 | MAE Test Loss: 0.009253264404833317 
Epoch: 9940 | MAE Train Loss: 1.1102230246251565e-14 | MAE Test Loss: 3.9435123189938276e-14 
Epoch: 9950 | MAE Train Loss: 1.1102230246251565e-14 | MAE Test Loss: 3.9435123189938276e-14 
Epoch: 9960 | MAE Train Loss: 1.1102230246251565e-14 | MAE Test Loss: 3.9435123189938276e-14 
Epoch: 9970 | MAE Train Loss: 1.1102230246251565e-14 | MAE Test Loss: 3.9435123189938276e-14 
Epoch: 9980 | MAE Train Loss: 1.1102230246251565e-14 | MAE Test Loss: 3.9435123189938276e-14 
Epoch: 9990 | MAE Train Loss: 1.1102230246251565e-14 | MAE Test Loss: 3.9435123189938276e-14 
# Plot the loss curves
plt.plot(epoch_count, train_loss_values, label="Train loss")
plt.plot(epoch_count, test_loss_values, label="Test loss")
plt.title("Training and test loss curves")
plt.ylabel("Loss")
plt.xlabel("Epochs")
plt.legend();

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-Tr7jKaov-1683687677715)(pytorch_01_%E5%B7%A5%E4%BD%9C%E6%B5%81%E5%9F%BA%E7%A1%80_files/pytorch_01_%E5%B7%A5%E4%BD%9C%E6%B5%81%E5%9F%BA%E7%A1%80_18_0.png)]

# Find our model's learned parameters
print("The model learned the following values for weights and bias:")
print(model_0.state_dict())
print("\nAnd the original values for weights and bias are:")
print(f"weights: {weight}, bias: {bias}")
The model learned the following values for weights and bias:
OrderedDict([('x1.weight', tensor([[0.7000]])), ('x1.bias', tensor([0.3000]))])

And the original values for weights and bias are:
weights: 0.7, bias: 0.3
# 1. Set the model in evaluation mode
model_0.eval()

# 2. Setup the inference mode context manager
with torch.inference_mode():
  # 3. Make sure the calculations are done with the model and data on the same device
  # in our case, we haven't setup device-agnostic code yet so our data and model are
  # on the CPU by default.
  # model_0.to(device)
  # X_test = X_test.to(device)
  y_preds = model_0(X_test)
y_preds
tensor([[0.8600],
        [0.8740],
        [0.8880],
        [0.9020],
        [0.9160],
        [0.9300],
        [0.9440],
        [0.9580],
        [0.9720],
        [0.9860]])
plot_predictions(predictions=y_preds)

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-BkakU2cR-1683687677715)(pytorch_01_%E5%B7%A5%E4%BD%9C%E6%B5%81%E5%9F%BA%E7%A1%80_files/pytorch_01_%E5%B7%A5%E4%BD%9C%E6%B5%81%E5%9F%BA%E7%A1%80_21_0.png)]

from pathlib import Path

# 1. Create models directory 
MODEL_PATH = Path("models")
MODEL_PATH.mkdir(parents=True, exist_ok=True)

# 2. Create model save path 
MODEL_NAME = "01_pytorch_workflow_model_0.pth"
MODEL_SAVE_PATH = MODEL_PATH / MODEL_NAME

# 3. Save the model state dict 
print(f"Saving model to: {MODEL_SAVE_PATH}")
torch.save(obj=model_0.state_dict(), # only saving the state_dict() only saves the models learned parameters
           f=MODEL_SAVE_PATH) 
Saving model to: models/01_pytorch_workflow_model_0.pth
# Check the saved file path
!ls -l models/01_pytorch_workflow_model_0.pth
-rw-r--r-- 1 10331309@zte.intra domain users@zte.intra 1063 5月   9 20:07 models/01_pytorch_workflow_model_0.pth
# Instantiate a new instance of our model (this will be instantiated with random weights)
loaded_model_0 = LinearRegressionModel()

# Load the state_dict of our saved model (this will update the new instance of our model with trained weights)
loaded_model_0.load_state_dict(torch.load(f=MODEL_SAVE_PATH))
<All keys matched successfully>
# 1. Put the loaded model into evaluation mode
loaded_model_0.eval()

# 2. Use the inference mode context manager to make predictions
with torch.inference_mode():
    loaded_model_preds = loaded_model_0(X_test) # perform a forward pass on the test data with the loaded model
# Compare previous model predictions with loaded model predictions (these should be the same)
y_preds == loaded_model_preds
tensor([[True],
        [True],
        [True],
        [True],
        [True],
        [True],
        [True],
        [True],
        [True],
        [True]])

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值