张量的拼接
torch.cat将张量按维度dim进行拼接
t = torch.ones(2,3)
tt = torch.zeros(2,3)
t1 = torch.cat(tensors = [t,tt],dim = 0)
t2 = torch.cat([t,tt],1)
print("t0:{} shape:{} \n t1:{} shape:{}".format(t1,t1.shape,t2,t2.shape))
torch.stack在新创建的维度dim上进行拼接
t = torch.ones(2,3)
tt = torch.zeros(2,3)
t1 = torch.stack([t,tt],0)
t2 = torch.stack([t,tt],1)
print("t1:{} shape:{} \n t2:{} shape:{}".format(t1, t1.shape, t2, t2.shape))
张量的切分
torch.chunk将张量按维度dim平均切分
t = torch.ones(2,7)
t1 = torch.chunk(t,dim=1, chunks=3)
#参数:input要切分的张量,chunks要切分的份数,dim要切分的维度
for idx, t in enumerate(list_of_tensors):
print("第{}个张量:{}, shape is {}".format(idx+1, t, t.shape))
torch.split将张量按维度dim进行切分
t = torch.ones((2, 5))
list_of_tensors = torch.split(t, [2, 1, 1], dim=1) # [2 , 1, 2]
#第一个参数为要切分的张量,第二个参数为int时,表示每一份的长度;为list时,按list元素切分,第三个参数为切分的维度
for idx, t in enumerate(list_of_tensors):
print("第{}个张量:{}, shape is {}".format(idx+1, t, t.shape))
张量的索引
torch.index_select在维度dim上,按index索引数据
t = torch.randint(0, 9, size=(3, 3))
idx = torch.tensor([0, 2], dtype=torch.long) # float
t_select = torch.index_select(t, dim=0, index=idx)
#dim要索引的维度,index要索引数据的序号
print("t:\n{}\nt_select:\n{}".format(t, t_select))
torch.masked_select 按mask中的True进行索引,返回一维的张量
t = torch.randint(0, 9, size=(3, 3))
mask = t.le(5)
# gt 将大于的位置标记为True,lt 小于的位置标记为True
# gt 将大于的位置标记为True,lt 小于的位置标记为True
t_select = torch.masked_select(t, mask)
#input 要索引的张量,mask与input同形状的布尔型张量
print("t:\n{}\nmask:\n{}\nt_select:\n{} ".format(t, mask, t_select))
张量变换
torch.reshape 改变张量的形状
t = torch.randperm(8)
t1 = torch.reshape(t,shape=(-1,2,2))
#shape为新张量的形状,当某一维度是-1时,该维度的数量由其他维度计算而得到
print("t:{}\nt_reshape:\n{}".format(t, t_reshape))
#当张量在内存中连续时,新张量与input共享数据内存
t[0] = 1024
print("t:{}\nt_reshape:\n{}".format(t, t_reshape))
print("t.data 内存地址:{}".format(id(t.data)))
print("t_reshape.data 内存地址:{}".format(id(t_reshape.data)))
- 注:当张量在内存中连续时,新张量与input共享数据内存
torch.transpose 交换张量的两个维度。常用于图像的预处理,例如将chanal*w*h的数据转换为w*h*chanal
torch.t 二维张量的转置等价于torch.transpose(input,0,1)
t = torch.rand(2,3,4)
t1 = torch.transpose(t,dim0=0,dim1=2)
#input要交换的张量,dim0要交换的维度,dim1要交换的维度
print("t shape:{} \n t1 shape:{}".format(t.shape,t1.shape))
torch.squeeze 压缩长度为1的维度
- dim若为None,移除所有长度为1的维度,若指定维度,当且仅当该维度长度为1时,才被移除
t = torch.rand((1, 2, 3, 1))
t_sq = torch.squeeze(t)
t_0 = torch.squeeze(t, dim=0)
t_1 = torch.squeeze(t, dim=1)
print(t.shape)
print(t_sq.shape)
print(t_0.shape)
print(t_1.shape)
torch.unsqueeze 依据dim拓展维度
t = torch.rand((2, 2))
t_sq = torch.unsqueeze(t,dim=2)
print(t_sq.shape) #output为(2,2,1)
张量的数学运算
t_0 = torch.randn((3, 3))
t_1 = torch.ones_like(t_0)
t_add = torch.add(t_0, 10, t_1)
print("t_0:\n{}\nt_1:\n{}\nt_add_10:\n{}".format(t_0, t_1, t_add))
线性回归的实现
import torch
import matplotlib.pyplot as plt
torch.manual_seed(10)
lr = 0.05 # 学习率 20191015修改
# 创建训练数据
x = torch.rand(20, 1) * 10 # x data (tensor), shape=(20, 1)
y = 2*x + (5 + torch.randn(20, 1)) # y data (tensor), shape=(20, 1)
# 构建线性回归参数
w = torch.randn((1), requires_grad=True)
b = torch.zeros((1), requires_grad=True)
for iteration in range(1000):
# 前向传播
wx = torch.mul(w, x)
y_pred = torch.add(wx, b)
# 计算 MSE loss
loss = (0.5 * (y - y_pred) ** 2).mean()
# 反向传播
loss.backward()
# 更新参数
b.data.sub_(lr * b.grad)
w.data.sub_(lr * w.grad)
# 清零张量的梯度 20191015增加
w.grad.zero_()
b.grad.zero_()
# 绘图
if iteration % 20 == 0:
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), y_pred.data.numpy(), 'r-', lw=5)
plt.text(2, 20, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
plt.xlim(1.5, 10)
plt.ylim(8, 28)
plt.title("Iteration: {}\nw: {} b: {}".format(iteration, w.data.numpy(), b.data.numpy()))
plt.pause(0.5)
if loss.data.numpy() < 1:
break
本文为深度之眼Pytorch课程的学习笔记,仅供自己学习使用,如有问题欢迎讨论!关于课程可以扫描下图二维码