- 张量(Tensors)类似于NumPy的ndarrays(点NumPy实践了解),但张量可以在GPU上进行计算。 所以从本质上来说,PyTorch是一个处理张量的库。一个张量是一个数字、向量、矩阵或任何n维数组。
# # 张量的创建 # import torch # import numpy # torch.manual_seed(7) 随机创建7个种子数 # # 直接创建 # a = torch.tensor([[1,2,3],[4,5,6]]) # print(a) # import numpy # import torch # a = numpy.array([1,2,3]) # t = torch.from_numpy(a) # print(a) # 依据数值创建 #torch.zeros(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) # import torch # # b = torch.zeros(2,3) # # print(b) # # # # #torch.zeros_like(input, dtype=None, layout=None, device=None, requires_grad=False) # # # 布局相同 # # imput = torch.empty(1,3) # # c = torch.zeros_like(imput) # # print(c) # # # # torch.full((2, 3), 3.141592) # # torch.arange(1, 2.5, 0.5) # # torch.linspace(start=-10, end=10, steps=5) # a = torch.eye(2) # print(a) # 依概率分布创建张量 # 创建正态分布张量 import torch # a = torch.normal(mean=torch.arange(1.0, 11.0),std = torch.arange(1, 0, -0.1)) # print(a) # # std = torch.arange(1, 0, -0.1) # print(std) # mean=torch.arange(1.0, 11.0) # print(mean) # # a = torch.normal(2, 3, size=(1, 4)) # print(a) # a = torch.randn(2,3) #标准正太分布 # print(a) # # a =torch.randint(3, 10, (2, 2)) #整数均匀分布 # print(a) # a =torch.randperm(4)#生成从0到n-1的随机排列 # print(a) # 以input为概率,生成伯努利分布(0-1分布,两点分布) # input = torch.empty(3,3).uniform_(0,1) # a = torch.bernoulli(input) # print(a) # 张量的操作 # 1.张量拼接与切分 # x = torch.randn(2,3) # a = torch.cat((x,x,x),1) # print(a) # a = torch.zeros (1,1) # b = torch.ones (1,1) # c = torch.stack([a, b], dim=2) # print (c) # # input = torch.randn(2, 3) # c = torch.chunk(input,1, dim=0) # print (c) # a = torch.arange(10).reshape(5,2) # b = torch.split(a,2) # print(b) # 张量索引 # x = torch.randn(3,4) # indices = torch.tensor([0 ,2]) #索引数据序号 # a = torch.index_select(x, 0,indices) # print(a) # x = torch.randn(3,4) #将正太分布中大于等于0.5的张良跳出来 # mask = x.ge(0.5) # b = torch.masked_select(x,mask) # print(b) # 张量变换 # a = torch.arange(4.) # b = torch.reshape(a,(2,2)) # print(b) # x = torch.randn(3,4) # c = torch.transpose(x,0,1) #两个维度的转换,原本是一个三行四列的转换成四列三行的 # print(c) # 转秩 # a = torch.randn(()) # b = torch.t(a) # print(b) # x = torch.zeros(2, 1, 2, 1, 2) # y = torch.squeeze(x) # print(y) # x = torch.tensor([1, 2, 3, 4]) # b = torch.unsqueeze(x, 0) # print(b) # 线性回归模型 # 训练样本x与y x = torch.rand(20,1)*10 y = 2*x +(5+torch.rand(20,1)) # 构建线性回归参数 w = torch.rand((1),requires_grad=True) b = torch.zeros((1),requires_grad=True) # 设置学习率 lr = 0.1 for iteration in range(100): # 前向传播 wx = torch.mul(w, x) y_pred = torch.add(wx, b) # 计算loss loss = (0.5 * (y - y_pred) ** 2).mean() # 反向传播 loss.backward() # 更新参数 b.data.sub_(lr * b.grad) # 这种_的加法操作时从自身减,相当于-= w.data.sub_(lr * w.grad) # 梯度清零 w.grad.data.zero_() b.grad.data.zero_() print(w.data, b.data)