import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torch import nn #nn是pytorch 的高级api,提供一系列成熟的层
X = torch.from_numpy(data.education.values.reshape(-1,1).astype(np.float32)) # 创建tensor
绘图plt.scatter()
plt.xlable(‘横坐标数据名’)
reshape(-1,1)-1自动生成 1长度为一
astype(np.float32) 统一数据类型
from torch import nn
model = nn.Linear(1,1) # out = w@input + b 等价于 model(input)
loss_fn = nn.MSELoss() # 从nn中调用,计算损失函数
opt = torch.optim.SGD(model.parameters(),lr=0.0001) #优化算法,随机梯度下降.model.parameters()优化参数,lr学习速率
for epoch in range(5000): #训练轮次
for x,y in zip(X,Y):
y_pred = model(x) #y_pred是模型输出,即预测值
loss = loss_fn(y,y_pred) #和实际的y计算损失函数
opt.zero_grad() # 把变量梯度清零
#反向传播,求解梯度
loss.backward() opt.step() # 优化模型参数
w = model.weight
b = model.bias
plt.scatter(data.Education,data.Income)
plt.plot(X.numpy(),model(X).data.numpy, c='r') # 画图
张量
x = torch.rand(2, 3)
print(x)
x = torch.randn(3, 4)
print(x)
x = torch.zeros(3, 4)
print(x)
x = torch.ones(3, 4, 2) # 3个4行2列的数组
print(x)
print(x.size())
print(x.size(0)) # 可以查看不同维度,第一维
# 数据类型
x = torch.tensor([6, 2], dtype=torch.float32) # dtype用来规定数据类型
print(x)
print(x.type())
x = x.type(torch.int64) # 转换数据类型
print(x)
a = np.random.randn(2,3)
print(a)
x1 = torch.from_numpy(a) # torch.from_numpy() 从numpy创建tensor
print(x1)
x1 = x1.view(3,2) # reshape
print(x1)
x1 = x1.view(-1,1) # -1自动计算,1代表一行个数
print(x1)
'''
tensor([[ 0.2251, -0.2176, -0.3631],
[ 1.3971, 0.2528, 0.6156]], dtype=torch.float64)
tensor([[ 0.2251, -0.2176],
[-0.3631, 1.3971],
[ 0.2528, 0.6156]], dtype=torch.float64)
tensor([[ 0.2251],
[-0.2176],
[-0.3631],
[ 1.3971],
[ 0.2528],
[ 0.6156]], dtype=torch.float64)
'''
x1 = x1.sum() # 求和
x1 = x1.mean() # 求均值
x1 = x1.item() # 返回标量值
张量自动微分
data 张量的值
grad梯度
grad_fn 由什么方式计算得到的
x = torch.ones(2, 2,requires_grad=True) # pytorch 跟踪张量所有的操作
print(x)
print(x.grad,x.grad_fn)
y = 2 * x + 2
print(y) # None None <AddBackward0 object at 0x00000285AE87D5B0>
# out = y.mean()
# print(out)
# out.backward()
out = y.sum()
out.backward() # 自动计算微分
print(x.grad) # 查看梯度
print(x.data)
with torch.no_grad: # 不计算梯度
print((x*2).requires_grad)
out = x.detach() # 不计算梯度的另一种方法
a.requires_grad_(True) # 改变requires_grad的方法
w = torch.randn(1, requires_grad=True)
b = torch.zeros(1, requires_grad=True)
learning_rate=0.0001
for epoch in range(5000):
for x, y in zip(X, Y):
y_pred=torch.matmul(x,w)+b
loss=(y-y_pred).pow(2).mean()
if not w.grad is None: #这里要清零梯度,不能累计计算前面循环的梯度
w.grad.data.zero_() #w梯度的值置为零
if not b.grad is None:
b.grad. data.zero_()
loss.backward() # 计算变量与输出之间的梯度 d loss/d w (w.grad) 和 d loss/d b (b.grad)
with torch.no_grad(): # 随机梯度下降
w.data -= w.grad.data*learning_rate
b.data -= b.grad.data*learning_rate