# 张量类似于numpy的ndarray,但还可以在GPU上是用来加速
from __future__ import print_function
import torch
# 创建一个没有初始化的矩阵
x = torch.empty(5,3)
print(x)
"""
tensor([[8.9082e-39, 8.9082e-39, 8.9082e-39],
[1.0194e-38, 9.1837e-39, 8.4490e-39],
[9.6429e-39, 8.4490e-39, 9.6429e-39],
[9.2755e-39, 1.0286e-38, 9.0919e-39],
[8.9082e-39, 9.2755e-39, 8.4490e-39]])
"""
# 创建一个随机初始化矩阵
x = torch.rand(5,3)
print(x)
"""
tensor([[0.6363, 0.4911, 0.4214],
[0.9973, 0.4196, 0.3368],
[0.0134, 0.2469, 0.3792],
[0.4146, 0.5446, 0.4023],
[0.2710, 0.3975, 0.5112]])
"""
# 构造一个填满数据且数据类型为long的矩阵
x = torch.zeros(5,3,dtype = torch.long)
print(x)
"""
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
"""
# 直接从数据构造张量
x = torch.tensor([5.5,3])
print(x) # tensor([5.5000, 3.0000])
# 根据已有的tensor建立新的tensor,除非用户提供新的值,否则这些方法将重用输入张量的属性,例如dtype等
x = x.new_ones(5,3,dtype = torch.double)
print(x)
"""
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=torch.float64)
"""
x = torch.randn_like(x,dtype = torch.float)
print(x)
"""
tensor([[ 0.4324, 1.2643, 0.3443],
[ 0.5959, 1.3176, 0.0298],
[-0.3860, -0.1308, -0.9114],
[ 0.8100, -1.3739, 1.6596],
[ 1.2363, 1.9997, 0.2527]])
"""
# 运算
# 形式一
y = torch.rand(5,3)
print(x + y)
"""
tensor([[ 1.4126e+00, 6.8308e-01, 4.7527e-01],
[-1.2185e+00, 1.0213e+00, 2.3724e-01],
[-3.1333e-01, 1.6759e+00, 2.0732e+00],
[ 7.4825e-01, -1.9758e-03, -8.2048e-02],
[ 1.2351e-01, -4.2529e-01, 9.7085e-01]])
"""
# 形式二:
print(torch.add(x,y))
"""
tensor([[-0.5781, 0.3660, 0.5735],
[ 0.4229, 0.0500, 0.2018],
[ 0.6387, 0.6206, 1.2428],
[ 0.5858, -1.0471, 0.3354],
[ 1.8888, 1.6495, 1.1323]])
"""
# 形式三:给出一个张量作为参数
result = torch.empty(5,3)
torch.add(x,y,out = result)
print(result)
"""
tensor([[ 0.3352, 0.2303, 0.0473],
[ 0.2402, -0.0209, 0.5980],
[ 0.8451, 0.1239, 1.8851],
[ 0.1244, 2.1331, -0.1232],
[ 1.1081, 0.5348, -1.9197]])
"""
# 形式四:原位/原地操作(in-place)
# add x to y
y.add_(x)
print(y)
"""
tensor([[ 1.5857, -1.4062, -0.5115],
[ 0.6804, 1.5849, 0.1429],
[ 1.7859, 1.9659, 1.1634],
[-0.1099, -1.1706, 1.5166],
[-0.0537, -0.0459, 0.0303]])
"""
# 注意:任何一个in-place改变张量的操作都是一个_。例如:x.copy_(y)
# 索引
print(x[:,1]) # 打印所有行的第一列
# tensor([-0.3033, 0.0998, 0.1000, 0.1890, -1.4164])
# 改变形状:如果想改变形状,可以使用torch.view
x = torch.randn(4,4)
y = x.view(16)
z = x.view(-1,8) # -1根据后面的维度自动计算
print(x.size(),y.size(),z.size()) # torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
# 对于只有一个元素的tensor,可以使用item()来获得对应的python数值
x = torch.randn(1)
print(x) # tensor([0.8120])
print(x.item()) # 0.8119511008262634
# 将torch的Tensor转换为Numpy数组
# torch张量和Numpy数组将共享他们底层内存位置,因此当一个改变时,另外一个也会改变
a = torch.ones(5)
print(a) # tensor([1., 1., 1., 1., 1.])
b = a.numpy()
print(b) # [1. 1. 1. 1. 1.]
a.add_(1) # 当a改变时,b也会随着改变
print(a) # tensor([2., 2., 2., 2., 2.])
print(b) # [2. 2. 2. 2. 2.]
# 将numpy数组转换为Torch张量
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a,1,out = a)
print(a) # [2. 2. 2. 2. 2.]
print(b) # tensor([2., 2., 2., 2., 2.], dtype=torch.float64)
张量
最新推荐文章于 2022-05-09 19:42:35 发布