import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
num_inputs = 2 # (特征数)为2。
num_examples = 1000 # 训练数据集样本数为1000
true_w = [2, -3.4] # 线性回归模型真实权᯿
true_b = 4.2 # 偏差 torch.from_numpy是将 numpy 转成 tensor
# loc(float):此概率分布的均值(对应着整个分布的中心centre
# scale(float):此概率分布的标准差(对应于分布的宽度,scale越大,图形越矮胖;scale越小,图形越瘦高)
# size(int or tuple of ints):输出的shape,默认为None,只输出一个值
features = torch.from_numpy(np.random.normal(0, 1, (num_examples, num_inputs))).to(torch.float32) #生产1000个数据
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b #生成1000个labels
labels += torch.from_numpy(np.random.normal(0, 0.01, size=labels.size())) #增加噪声
print(features)
def use_svg_display():
# ⽤⽮量图显示
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
use_svg_display()
# 设置图的尺⼨
plt.rcParams['figure.figsize'] = figsize
print(features[0], labels[0])
# # 在../d2lzh_pytorch⾥⾯添加上⾯两个函数后就可以这样导⼊
# import sys
# sys.path.append("..")
# from d2lzh_pytorch import *
set_figsize() #.numpy()数据类型转换
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)
plt.show()
# 本函数已保存在d2lzh包中⽅便以后使⽤
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices) # 样本的读取顺序是随机的 以batch_size为步长 i是batch_size的整数
for i in range(0, num_examples, batch_size):
j = torch.LongTensor(indices[i: min(i + batch_size,
num_examples)]) # 最后⼀次可能不⾜⼀个batch
yield features.index_select(0, j), labels.index_select(0, j)
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, y)
break
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)),
dtype=torch.float32)
b = torch.zeros(1, dtype=torch.float32)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
def linreg(X, w, b): # 本函数已保存在d2lzh_pytorch包中⽅便以后使⽤
return torch.mm(X, w) + b
def squared_loss(y_hat, y): # 本函数已保存在d2lzh_pytorch包中⽅便以后使
# 注意这⾥返回的是向量, 另外, pytorch⾥的MSELoss并没有除以 2
return (y_hat - y.view(y_hat.size())) ** 2 / 2
def sgd(params, lr, batch_size): # 本函数已保存在d2lzh_pytorch包中⽅便
for param in params:
param.data -= lr * param.grad / batch_size #
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs): # 训练模型⼀共需要num_epochs个迭代周期
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum() #求和操作
l.backward() # ⼩批量的损失对模型参数求梯度
sgd([w, b], lr, batch_size) # 使⽤⼩批量随机梯度下降迭代模型参数
# 不要忘了梯度清零
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
print(true_w, '\n', w)
print(true_b, '\n', b)
自己动手写数据输入并计算pytorch
最新推荐文章于 2024-07-19 18:08:20 发布