pytorch张量

from __future__ import print_function
import torch
import numpy as np
torch.__version__
'1.3.1'
x = torch.empty(5,3)
print(x)
tensor([[9.1834e-41, 0.0000e+00, 0.0000e+00],
        [0.0000e+00, 0.0000e+00, 0.0000e+00],
        [0.0000e+00, 0.0000e+00, 0.0000e+00],
        [0.0000e+00, 1.0762e-42, 0.0000e+00],
        [0.0000e+00, 5.6718e-11, 0.0000e+00]])
x = torch.rand(5,3)
print(x)
# 查看tensor大小:shape属性,或者size()函数
print(x.shape)
print(x.size())
tensor([[0.2526, 0.7656, 0.2396],
        [0.5391, 0.4434, 0.2136],
        [0.3745, 0.6352, 0.6987],
        [0.6211, 0.2825, 0.0946],
        [0.9340, 0.6565, 0.6271]])
torch.Size([5, 3])
torch.Size([5, 3])

在同构的意义下,第零阶张量 (r = 0) 为标量 (Scalar),第一阶张量 (r = 1) 为向量 (Vector), 第二阶张量 (r = 2) 则成为矩阵 (Matrix),第三阶以上的统称为多维张量。

x = torch.zeros(5, 3, dtype=torch.long)
print(x)
tensor([[0, 0, 0],
        [0, 0, 0],
        [0, 0, 0],
        [0, 0, 0],
        [0, 0, 0]])
x = torch.tensor([[5.5,3],[5,3]])
print(x)
tensor([[5.5000, 3.0000],
        [5.0000, 3.0000]])
x = x.new_ones(5,3,dtype=torch.double) # new_*方法创建对象
print(x)

x=torch.randn_like(x,dtype=torch.float) # 覆盖 dtype
print(x)
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]], dtype=torch.float64)
tensor([[ 1.1492, -0.3027, -0.3889],
        [-0.0061, -0.8942,  2.5887],
        [-1.8800, -1.1619, -0.6853],
        [-0.2841,  0.1407,  0.0970],
        [ 0.0358, -0.4802,  0.7781]])
print(x.size())
torch.Size([5, 3])

基本操作

# 加法
y = torch.rand(5,3)
print(x)
print(y)
print(x+y)
tensor([[ 1.1492, -0.3027, -0.3889],
        [-0.0061, -0.8942,  2.5887],
        [-1.8800, -1.1619, -0.6853],
        [-0.2841,  0.1407,  0.0970],
        [ 0.0358, -0.4802,  0.7781]])
tensor([[0.2793, 0.7743, 0.4154],
        [0.5497, 0.4504, 0.7533],
        [0.1075, 0.5314, 0.1604],
        [0.6892, 0.6163, 0.0620],
        [0.7559, 0.3567, 0.2655]])
tensor([[ 1.4285,  0.4716,  0.0266],
        [ 0.5437, -0.4439,  3.3420],
        [-1.7725, -0.6305, -0.5249],
        [ 0.4050,  0.7570,  0.1590],
        [ 0.7917, -0.1235,  1.0436]])
print(torch.add(x,y))
tensor([[ 1.4285,  0.4716,  0.0266],
        [ 0.5437, -0.4439,  3.3420],
        [-1.7725, -0.6305, -0.5249],
        [ 0.4050,  0.7570,  0.1590],
        [ 0.7917, -0.1235,  1.0436]])
result = torch.empty(5,3)
torch.add(x,y,out = result)
print(result)
tensor([[ 1.4285,  0.4716,  0.0266],
        [ 0.5437, -0.4439,  3.3420],
        [-1.7725, -0.6305, -0.5249],
        [ 0.4050,  0.7570,  0.1590],
        [ 0.7917, -0.1235,  1.0436]])
y.add_(x)
print(y) # 以“_”结尾的操作会用结果替换原变量。如"x.copy_(y)","x.t_()"
tensor([[ 1.4285,  0.4716,  0.0266],
        [ 0.5437, -0.4439,  3.3420],
        [-1.7725, -0.6305, -0.5249],
        [ 0.4050,  0.7570,  0.1590],
        [ 0.7917, -0.1235,  1.0436]])
# 取值
x = torch.rand(5,3)
print(x)
print(x[:, 1]) # 索引第二列

# 沿着行取最大值
max_value, max_idx = torch.max(x, dim=1)
print(max_value, max_idx)

# 每行求和
sum_x = torch.sum(x, dim=1)
print(sum_x)
tensor([[0.9938, 0.4877, 0.3327],
        [0.2078, 0.0133, 0.7218],
        [0.9322, 0.7934, 0.8954],
        [0.3437, 0.4242, 0.2898],
        [0.8473, 0.1036, 0.8701]])
tensor([0.4877, 0.0133, 0.7934, 0.4242, 0.1036])
tensor([0.9938, 0.7218, 0.9322, 0.4242, 0.8701]) tensor([0, 2, 0, 1, 2])
tensor([1.8143, 0.9429, 2.6210, 1.0577, 1.8209])
# torch.view改变张量维度及大小,与Numpy的reshape类似
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # size -1 从其他维度推断
print(x.size(), y.size(), z.size())
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
# 对于只有一个元素的tensor(或者标量),可以使用
# .item()取出对应的python对象数据
x = torch.randn(1) # 单元素张量
print(x)
print(x.item()) 
y = torch.tensor(3) # 标量
print(y)
print(y.item())
tensor([0.5168])
0.5167824029922485
tensor(3)
3

基本数据类型

Tensor的基本数据类型有:
tensor.long() # 64位长整型
tensor.int() # 32位整型
tensor.short() # 16位短整型
tensor.float() # 32位浮点型(默认)
tensor.double() # 64位浮点型

Numpy转换

# Numpy转换
a = torch.ones(5)
print(a)
tensor([1., 1., 1., 1., 1.])
b = a.numpy()
print(b)
[1. 1. 1. 1. 1.]
a.add_(1)
print(a,b)
tensor([2., 2., 2., 2., 2.]) [2. 2. 2. 2. 2.]
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a) # 使用.from_numpy()转换为Tensor
np.add(a, 1, out=a)
print(a,b)
[2. 2. 2. 2. 2.] tensor([2., 2., 2., 2., 2.], dtype=torch.float64)
# CUDA张量
# is_available 函数判断是否有cuda可以使用
# torch.device 将张量移动到指定设备
if torch.cuda.is_available():
    device = torch.device("cuda")
    y = torch.ones_like(x, device=device) # 直接从GPU创建张量
    x = x.to(device) # .to("cuda") 将张量移动到cuda中
z = x + y
print(z)
print(z.to("cpu", torch.double))
tensor([4.3497, 5.0759, 2.6086, 2.4112, 3.5776, 5.4971, 4.8984, 1.5604, 3.7739,
        3.1584, 2.7664, 0.9340, 2.6396, 2.8920, 2.3703, 1.5383])
tensor([4.3497, 5.0759, 2.6086, 2.4112, 3.5776, 5.4971, 4.8984, 1.5604, 3.7739,
        3.1584, 2.7664, 0.9340, 2.6396, 2.8920, 2.3703, 1.5383],
       dtype=torch.float64)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值