【torch张量】

【torch张量】

参考:https://blog.csdn.net/xholes/article/details/81667211
张量数据类型: 9种

torch.float32(torch.float)
torch.float64(torch.double)
torch.float16(torch.half)
torch.uint8
torch.int8
torh.int16(torch.short)
torch.int32(torch.int)
torch.int64(torch.long)
torch.bool

1. 创建张量

来自其他数据
1) torch.Tensor是类(输出float类型),现在多用.FloatTensor()输出一个单精度浮点型张量;
2)torch.tensor()仅仅是python函数,根据原始数据类型生成相应的torch.LongTensor、torch.FloatTensor和torch.DoubleTensor,不会改变原数据类型,参考:http://t.csdn.cn/jmW1k

import torch as t
import numpy as np
a1 = t.Tensor([[1,2,3],[4,5,6]])#Tensor是类(float类型)#现在多用.FloatTensor()输出一个单精度浮点型张量
x = np.array([[1,2,3],[4,5,6]])
a2 = t.tensor(x) #转成张量,tensor是函数(类型同x)
#a2 = t.Tensor(x)
print(a1)
print(a2)
print(t.FloatTensor([[1,2,3],[4,5,6]]))

a3 = t.from_numpy(x) # 与x共享内存
x[0,0] = -1
print(a2)
print(a3)

输出:

tensor([[1., 2., 3.],
        [4., 5., 6.]])
tensor([[1, 2, 3],
        [4, 5, 6]], dtype=torch.int32)
tensor([[1., 2., 3.],
        [4., 5., 6.]])
tensor([[1, 2, 3],
        [4, 5, 6]], dtype=torch.int32)
tensor([[-1,  2,  3],
        [ 4,  5,  6]], dtype=torch.int32)

2. 创建张量

来自torch自身函数

print(t.Tensor(2,3)) # 类似numpy.empty
print(t.arange(5))
print(t.linspace(0,1,10))

print(t.zeros(3,2,dtype=t.long))
print(t.ones(3,3))
print(t.eye(3,3))
print(t.zeros_like(a1))

print(t.rand(2,3)) # [0,1)之间的随机数
print(t.randn(2,3)) # 标准正态分布
print(t.randint(0,10,(2,3))) # [0,10)之间的随机整数

输出:

tensor([[0., 0., 0.],
        [0., 0., 0.]])
tensor([0, 1, 2, 3, 4])
tensor([0.0000, 0.1111, 0.2222, 0.3333, 0.4444, 0.5556, 0.6667, 0.7778, 0.8889,
        1.0000])
tensor([[0, 0],
        [0, 0],
        [0, 0]])
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]])
tensor([[1., 0., 0.],
        [0., 1., 0.],
        [0., 0., 1.]])
tensor([[0., 0., 0.],
        [0., 0., 0.]])
tensor([[0.8671, 0.1191, 0.5539],
        [0.9079, 0.1528, 0.8069]])
tensor([[-0.8538, -0.3312,  2.0830],
        [-0.5132,  1.3465, -0.1405]])
tensor([[9, 7, 5],
        [1, 7, 2]])

3. 属性

print(a1.dtype,a1.device,a1.requires_grad)
print(a1.size(), a1.shape, t.numel(a1))

输出:

torch.float32 cpu False
torch.Size([2, 3]) torch.Size([2, 3]) 6

4. 查看维度

b1 = t.randint(0,10,(2,3,4))
print(b1)
print(b1.size(), b1.shape)
print(b1.size(2),b1.shape[2]) # 输出张量 b1 的第三个维度的大小
print(t.numel(b1), b1.ndimension())

输出:

tensor([[[6, 5, 9, 4],
         [4, 3, 6, 3],
         [7, 2, 7, 4]],

        [[9, 4, 5, 6],
         [6, 8, 5, 9],
         [1, 8, 2, 7]]])
torch.Size([2, 3, 4]) torch.Size([2, 3, 4])
4 4
24 3

5. 改变表达形状

b2 = b1.view(6,-1)
print(b1, b1.size())
print(b2, b2.size())

b3 = b1.unsqueeze(0)
print(b3, b3.size())

b4 = b3.squeeze() # 将 b3 张量中维度为 1 的维度去掉后得到的新张量
print(b4, b4.size())

b5 = b1.flatten()
print(b5, b5.size())

输出:

tensor([[[6, 5, 9, 4],
         [4, 3, 6, 3],
         [7, 2, 7, 4]],

        [[9, 4, 5, 6],
         [6, 8, 5, 9],
         [1, 8, 2, 7]]]) torch.Size([2, 3, 4])
tensor([[6, 5, 9, 4],
        [4, 3, 6, 3],
        [7, 2, 7, 4],
        [9, 4, 5, 6],
        [6, 8, 5, 9],
        [1, 8, 2, 7]]) torch.Size([6, 4])
tensor([[[[6, 5, 9, 4],
          [4, 3, 6, 3],
          [7, 2, 7, 4]],

         [[9, 4, 5, 6],
          [6, 8, 5, 9],
          [1, 8, 2, 7]]]]) torch.Size([1, 2, 3, 4])
tensor([[[6, 5, 9, 4],
         [4, 3, 6, 3],
         [7, 2, 7, 4]],

        [[9, 4, 5, 6],
         [6, 8, 5, 9],
         [1, 8, 2, 7]]]) torch.Size([2, 3, 4])
tensor([6, 5, 9, 4, 4, 3, 6, 3, 7, 2, 7, 4, 9, 4, 5, 6, 6, 8, 5, 9, 1, 8, 2, 7]) torch.Size([24])

6. 转置

print('a1= ', a1,a1.size())
print('a1转置形状: ', a1.t().size()) # 只针对二阶张量的转置

c1 = b1.transpose(0,2)  # 0,2维度互换
print('b1=',b1,b1.size())
print('c1=',c1, c1.size())
c2 = b1.transpose(0,2).contiguous() 
print('c2=', c2, c2.size())
# view不会改变内存中元素存储的顺序,但transpose和t()都会改变内存中元素存储的顺序,t()表示T转置。
# 最后再使用contiguous()把经过了transpose或t()操作的tensor重新处理为具有内存连续的有相同数据的tensor。
print(b1.data_ptr(), c1.data_ptr(), c2.data_ptr()) # 存储的位置不变

输出:

a1=  tensor([[1., 2., 3.],
        [4., 5., 6.]]) torch.Size([2, 3])
a1转置形状:  torch.Size([3, 2])
b1= tensor([[[6, 5, 9, 4],
         [4, 3, 6, 3],
         [7, 2, 7, 4]],

        [[9, 4, 5, 6],
         [6, 8, 5, 9],
         [1, 8, 2, 7]]]) torch.Size([2, 3, 4])
c1= tensor([[[6, 9],
         [4, 6],
         [7, 1]],

        [[5, 4],
         [3, 8],
         [2, 8]],

        [[9, 5],
         [6, 5],
         [7, 2]],

        [[4, 6],
         [3, 9],
         [4, 7]]]) torch.Size([4, 3, 2])
c2= tensor([[[6, 9],
         [4, 6],
         [7, 1]],

        [[5, 4],
         [3, 8],
         [2, 8]],

        [[9, 5],
         [6, 5],
         [7, 2]],

        [[4, 6],
         [3, 9],
         [4, 7]]]) torch.Size([4, 3, 2])
1823826886464 1823826886464 1823826882624

7. 拼接

print("b1 = ", b1, b1.size())
d1 = t.stack([b1,b1],0) # 行拼接,增加维度,改变形状
print("d1= ", d1, d1.size())

d2 = t.cat([b1,b1], 0) # 行拼接,增加行数,不改变形状
print("d2 = ", d2, d2.size())

输出:

b1 =  tensor([[[6, 5, 9, 4],
         [4, 3, 6, 3],
         [7, 2, 7, 4]],

        [[9, 4, 5, 6],
         [6, 8, 5, 9],
         [1, 8, 2, 7]]]) torch.Size([2, 3, 4])
d1=  tensor([[[[6, 5, 9, 4],
          [4, 3, 6, 3],
          [7, 2, 7, 4]],

         [[9, 4, 5, 6],
          [6, 8, 5, 9],
          [1, 8, 2, 7]]],


        [[[6, 5, 9, 4],
          [4, 3, 6, 3],
          [7, 2, 7, 4]],

         [[9, 4, 5, 6],
          [6, 8, 5, 9],
          [1, 8, 2, 7]]]]) torch.Size([2, 2, 3, 4])
d2 =  tensor([[[6, 5, 9, 4],
         [4, 3, 6, 3],
         [7, 2, 7, 4]],

        [[9, 4, 5, 6],
         [6, 8, 5, 9],
         [1, 8, 2, 7]],

        [[6, 5, 9, 4],
         [4, 3, 6, 3],
         [7, 2, 7, 4]],

        [[9, 4, 5, 6],
         [6, 8, 5, 9],
         [1, 8, 2, 7]]]) torch.Size([4, 3, 4])

8. 函数

e = t.Tensor([[2,5,3], [6,4,1]])
print('e= ',e)
print('e按行的2范数= ',e.norm(2,dim = 0)) # 求得一行的2范数
print('e求每个数的平方根= ',e.sqrt())
print('e累加= ',e.sum(), 'e平均值= ',e.mean())
print('e最大值= ',e.max(), 'e按行的最大值= ',e.max(0),'e按行的最大值的索引= ', e.argmax(0))
print('e按列排序= ',e.sort(1))
print('e按列从大到小排序前2个= ',e.topk(2,dim=1,largest=True)) # 按列获取前k个,largest=True按照从大到小排序

输出:

e=  tensor([[2., 5., 3.],
        [6., 4., 1.]])
e按行的2范数=  tensor([6.3246, 6.4031, 3.1623])
e求每个数的平方根=  tensor([[1.4142, 2.2361, 1.7321],
        [2.4495, 2.0000, 1.0000]])
e累加=  tensor(21.) e平均值=  tensor(3.5000)
e最大值=  tensor(6.) e按行的最大值=  torch.return_types.max(
values=tensor([6., 5., 3.]),
indices=tensor([1, 0, 0])) e按行的最大值的索引=  tensor([1, 0, 0])
e按列排序=  torch.return_types.sort(
values=tensor([[2., 3., 5.],
        [1., 4., 6.]]),
indices=tensor([[0, 2, 1],
        [2, 1, 0]]))
e按列从大到小排序前2个=  torch.return_types.topk(
values=tensor([[5., 3.],
        [6., 4.]]),
indices=tensor([[1, 2],
        [0, 1]]))

9. 运算

按元素操作

print('a1+a2= ', a1+a2)
f1 = a1.clone() # 复制
print(a1.data_ptr(), f1.data_ptr()) # 存储的位置不同
f1.add_(a2) # 相加
print('f1 = ',f1)
print('a1*a2 = ', a1*a2) # 相应位置相乘

输出:

a1+a2=  tensor([[ 2.,  4.,  6.],
        [ 8., 10., 12.]])
1823889896256 1823889897728
f1 =  tensor([[ 2.,  4.,  6.],
        [ 8., 10., 12.]])
a1*a2 =  tensor([[ 1.,  4.,  9.],
        [16., 25., 36.]])

10. 乘法

1. 对应相乘

a1+a2=  tensor([[ 2.,  4.,  6.],
        [ 8., 10., 12.]])
1823889896256 1823889897728
f1 =  tensor([[ 2.,  4.,  6.],
        [ 8., 10., 12.]])
a1*a2 =  tensor([[ 1.,  4.,  9.],
        [16., 25., 36.]])

输出:

f2= tensor([0.6125, 0.3544, 1.3473]) torch.Size([3])
f3= tensor([[ 0.2470, -0.0424, -0.8120, -0.1527],
        [-0.3853, -0.6378, -0.7061, -0.5613],
        [-0.6067, -1.7834, -1.0439, -0.5790]]) torch.Size([3, 4])
f4= tensor([ 0.5779, -0.2921, -0.6215, -0.7917]) torch.Size([4])
tensor([-0.8026, -2.6548, -2.1541, -1.0725]) 
 torch.Size([4]) 
 tensor([0.7806, 0.8469, 1.2774]) 
 torch.Size([3])

2. 矩阵相乘

f5 = t.randn(4,3)
print('f5=', f5, f5.shape)
f6 = f3.matmul(f5)  # (3*4)*(4*3)=(3*3)
f7 = f3.mm(f5)  # (3*4)*(4*3)=(3*3)
f8 = f3 @ f5    # (3*4)*(4*3)=(3*3) #以上三种矩阵乘法相同
print(f6.shape, f7.shape, f8.size())
print((f6==f7).all(), (f6==f8).all())

输出:

f5= tensor([[-0.1658, -2.0029, -0.6491],
        [-1.1083,  0.0795, -0.3132],
        [-0.4568,  0.0212,  0.3957],
        [-1.7799,  0.4910, -0.0948]]) torch.Size([4, 3])
torch.Size([3, 3]) torch.Size([3, 3]) torch.Size([3, 3])
tensor(True) tensor(True)

3. 批矩阵相乘

f9 = t.randn(10,3,4)
f10 = t.randn(10,4,3)
f11 = f9.bmm(f10)# 批矩阵相乘
f12 = f9 @ f10
f13 = t.einsum('bij,bjk->bik ',f9,f10) # Einstein求和约定
# 这个表达式被 -> 分为两部分:左边部分分别定义了两个输入矩阵的 axes(逗号分割),右边部分定义了输出矩阵的 axes
print(f11.size(),f12.size(),f13.size())
print((f11==f12).all(),(f11==f13).all())

输出:

torch.Size([10, 3, 3]) torch.Size([10, 3, 3]) torch.Size([10, 3, 3])
tensor(True) tensor(True)

11. 传播

print('b1= ',b1,b1.shape) # [2, 3, 4]
b6 = t.randint(0,10,(2,4))
print('b6= ',b6,b6.shape)
b7 = b1 + b6.view(2,1,4)# b6的1*4个数据逐个加在b1的3*4上,分两次
print('b7= ',b7,b7.shape)

输出:

b1=  tensor([[[6, 5, 9, 4],
         [4, 3, 6, 3],
         [7, 2, 7, 4]],

        [[9, 4, 5, 6],
         [6, 8, 5, 9],
         [1, 8, 2, 7]]]) torch.Size([2, 3, 4])
b6=  tensor([[3, 4, 7, 8],
        [3, 0, 5, 6]]) torch.Size([2, 4])
b7=  tensor([[[ 9,  9, 16, 12],
         [ 7,  7, 13, 11],
         [10,  6, 14, 12]],

        [[12,  4, 10, 12],
         [ 9,  8, 10, 15],
         [ 4,  8,  7, 13]]]) torch.Size([2, 3, 4])

12. 其他

h1 = a1.numpy() #转换成numpy类型数据
print(type(h1))
h2 = t.rand(1)
print(h2, h2.item()) #.item取出数值

输出:

<class 'numpy.ndarray'>
tensor([0.4144]) 0.41435736417770386

GPU调用:

device = t.device("cuda" if t.cuda.is_available() else "cpu")
print(device)
g1 = a1.to(device)
print(g1.get_device())
g2 = g1.cuda()
g3 = a1.clone().cuda(0)
print(g2.get_device(), g3.get_device())

输出:

cuda
0
0 0
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值