import numpy as np
import torch
#创建矩阵
x=torch.empty(5,3)
print(x)
#创建随机矩阵
x=torch.rand(5,3)
print(x)
#初始化全零的矩阵
x=torch.zeros(5,3,dtype=torch.long)
print(x)
#直接传入数据
#x = torch.tensor([5.5,3])
print(x)
#展示矩阵大小
print(x.size())
#基本计算方法
y = torch.rand(5,3)
print(x+y)
print(torch.add(x,y))
#索引
print(x[:,1]) #:表示取所有
#view操作可以改变矩阵维度 reshape操作
x = torch.rand(4,4)
y = x.view(16)
z = x.view(-1,8) #-1表示自动做计算 4*4=16 第二个维度是8
print(x.size(),y.size(),z.size())
#框架把反向传播全部计算好了
import torch
#需要求导的,可以手动定义
x = torch.rand(3,4,requires_grad= True)
print(x)
#方法二
x = torch.randn(3,4)
x.requires_grad=True
print(x)
#反向传播
b = torch.randn(3,4,requires_grad=True)
t = x + b
y = t.sum()
print(y)
y.backward()
print(b.grad)
print(x.requires_grad,b.requires_grad,t.requires_grad) #只要做了反向传播,那么用到谁就会指定谁为True(requires_grad)
#计算流程
x = torch.rand(1)
b = torch.rand(1,requires_grad=True)
w = torch.rand(1,requires_grad=True)
y = w * x
z = y + b
print(x.requires_grad,b.requires_grad,w.requires_grad,y.requires_grad)
print(x.is_leaf,w.is_leaf,b.is_leaf,z.is_leaf)
z.backward(retain_graph=True) #如果不清零会累加
print(w.grad)
print(b.grad)
#线性回归模型
#构建输入数据x和对应的标签y
x = [i for i in range(11)]
x_train = np.array(x,dtype=np.float32)
x_train = x_train.reshape(-1,1)
print(x_train.shape)
y = [2*i + 1 for i in x] #y=2*x+1
y_train = np.array(y,dtype=np.float32)
y_train = y_train.reshape(-1,1)
print(y_train.shape)
import torch
import torch.nn as nn #torch.nn提供了各种类型的神经网络层,如全连接层、卷积层、循环神经网络等
#构造线性回归模型
class LinearRegressionModel(nn.Module):
def __init__(self,input_dim,output_dim):
super(LinearRegressionModel, self).__init__()
self.linear = nn.Linear(input_dim,output_dim) #用到哪个层写哪个层 输入输出维度
def forward(self,x):
out = self.linear(x)
return out
input_dim = 1
output_dim = 1
model = LinearRegressionModel(input_dim,output_dim)
print(model)
#定义损失函数和优化器
epochs = 1000
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)
criterion = nn.MSELoss()
#开始训练模型
for epoch in range(epochs):
epoch += 1
#注意转化为tensor
inputs = torch.from_numpy(x_train)
labels = torch.from_numpy(y_train)
#梯度要清零在每次迭代后
optimizer.zero_grad()
#前向传播
outputs = model(inputs)
#计算损失
loss = criterion(outputs,labels)
#反向传播
loss.backward()
#更新权重参数
optimizer.step()
if epoch % 50 == 0:
print('epoch {},loss {}'.format(epoch,loss.item()))
#测试模型预测结果
#predicteed = model(torch.from_numpy(x_train).requires_grad()).data.numpy()
#模型的保存与读取
torch.save(model.state_dict(),'model.pkl')
model.load_state_dict(torch.load('model.pkl'))
#Tensor常见的形式 scalar(数值) vector(向量) matrix(矩阵) n-dimensional tensor(高维特征)
from torch import tensor
x = tensor(42)
print(x)
print(x.dim())
print(2*x)
print(x.item())
#vector
v = tensor([1.5,-0.5,3.0])
print(v)
print(v.dim())
print(v.size())
#matrix
M = tensor([[1.,2.],[3.,4.]])
print(M)
print(M.matmul(M)) #内积
print(tensor([1.,0.]).matmul(M))
print(M*M)
#Hub模块 调用别人训练好的模型和框架 以及训练好的权重参数,使得任务用一行代码就能解决掉
#模型:https://pytorch.org/hub/research-models
##Github:https://github.com/pytorch/hub
import torch
model = torch.hub.load('pytorch/vision:v0.4.2','deeplabv3_resnet101',pretrained=True)
model.eval()
线性回归模型的学习代码 3/27
最新推荐文章于 2024-06-15 15:24:03 发布