Pytorch极简入门教程(二)—— 张量与数据类型

import torch
import numpy as np

x = torch.randn(2, 3)
print("x:\t", x)
# 区别 x.size() 和 x.shape()
print("x.size():\t", x.size())
print("x.size(0):\t", x.size(0))
print("x.shape:\t", x.shape)

A = torch.zeros(2, 3)
print("A:\t", A)

B = torch.ones((2, 3, 4), dtype=torch.int32) # dim=3
print("B:\t", B)

x = torch.tensor([6,2], dtype=torch.float32)
print("x:\t", x)
print("x.type():\t", x.type())

b = x.type(torch.int32)
print("b:\t", b)
print("b.type():\t", b.type())

"""
tensor 与 ndarray数据类型转换
"""
# random 正态分布 0~1
a = np.random.randn(2,3)
x1 = torch.from_numpy(a)
print(x1.type())
# x1 = x1.type(torch.int32) # 将DoubleTensor转换成 Int32Tensor
print("x1:\t", x1)
# 将Tensor转换成Numpy
x2 = x1.numpy()
print("x2:\t", x2)

x3 = torch.rand((2, 3), dtype=torch.float64)
print("x3:\t", x3)
# x1 + x3
print("x1 + x3:\t", x1+x3)
# x1 + 3
print("x1 + 3:\t", x1+3)
# x1.add(x3)
print("x1.add(x3):\t", x1.add(x3))
# x1.add_(x2) 这种操作会改变x1的值,将原有的内存直接覆盖
print("x1.add_(x3):\t", x1.add_(x3))
print("x1:\t", x1)

"""
形状变换
"""
# view(-1, 1):表示第二维为1,-1表示自动计算   == shape(n, 1)
x1.view(-1, 1)
print("x1.view(-1, 1):\t", x1.view(-1, 1))

# 求和 求均值
x1.mean()
print("x1.mean():\t", x1.mean())
x4 = x1.sum()
print("x4:\t", x4)

# x4是一个Tensor值 要求返回标量值采用item()
x4.item()
print("x4.item():\t", x4.item())

"""
张量的自动微分
将Torch.Tensor属性, .require_grad设置为True
pytorch将开始跟踪对比张量的所有操作
完成计算后,可以调用.backward()并自动计算所有梯度
该张量的梯度将累加到.grad属性中
"""
x = torch.ones(2, 2, requires_grad=True)
print("y:\t", x)
print("y.requires_grad:\t", x.requires_grad)
# x.grad     x.grad_fn 返回皆为空
print("x.grad:\t", x.grad)
print("x.grad_fn:\t", x.grad_fn)

y = x + 2
print("z:\t", y)
print("y.gard_fn:\t", y.grad_fn)

z = y*y + 3
print("z1:\t", z)
out = z.mean()
print("out:\t", out)
# out = f(x)    d(out)/dx
out.backward()

# 梯度
x.grad
print("x.grad:\t", x.grad)
print("y.gard:\t", y.grad)

# 查看x的值
x.data
print("x.data:\t", x.data)

# 无法更新梯度
with torch.no_grad():
    print((x**2).requires_grad)
    y = x.detach()
    print("y.requires_grad", y.requires_grad)

# 修改requires_grad
a = torch.tensor([2, 3], dtype=torch.float16) # dtype 必须为浮点型张量,因为只有浮点型张量才有梯度
print("a.requires_grad:\t", a.requires_grad)
a.requires_grad_(True)
print("a.requires_grad_(True)", a.requires_grad_(True))

x:	 tensor([[-1.2922,  0.6656,  0.4903],
        [ 1.4096, -0.8729,  0.1928]])
x.size():	 torch.Size([2, 3])
x.size(0):	 2
x.shape:	 torch.Size([2, 3])
A:	 tensor([[0., 0., 0.],
        [0., 0., 0.]])
B:	 tensor([[[1, 1, 1, 1],
         [1, 1, 1, 1],
         [1, 1, 1, 1]],

        [[1, 1, 1, 1],
         [1, 1, 1, 1],
         [1, 1, 1, 1]]], dtype=torch.int32)
x:	 tensor([6., 2.])
x.type():	 torch.FloatTensor
b:	 tensor([6, 2], dtype=torch.int32)
b.type():	 torch.IntTensor
torch.DoubleTensor
x1:	 tensor([[-0.6928, -1.8161,  0.3243],
        [ 0.7815,  0.2741,  0.8839]], dtype=torch.float64)
x2:	 [[-0.69283026 -1.81614015  0.32434972]
 [ 0.78146688  0.27406404  0.88388103]]
x3:	 tensor([[0.9575, 0.4844, 0.2129],
        [0.5024, 0.4066, 0.5644]], dtype=torch.float64)
x1 + x3:	 tensor([[ 0.2647, -1.3317,  0.5373],
        [ 1.2838,  0.6807,  1.4483]], dtype=torch.float64)
x1 + 3:	 tensor([[2.3072, 1.1839, 3.3243],
        [3.7815, 3.2741, 3.8839]], dtype=torch.float64)
x1.add(x3):	 tensor([[ 0.2647, -1.3317,  0.5373],
        [ 1.2838,  0.6807,  1.4483]], dtype=torch.float64)
x1.add_(x3):	 tensor([[ 0.2647, -1.3317,  0.5373],
        [ 1.2838,  0.6807,  1.4483]], dtype=torch.float64)
x1:	 tensor([[ 0.2647, -1.3317,  0.5373],
        [ 1.2838,  0.6807,  1.4483]], dtype=torch.float64)
x1.view(-1, 1):	 tensor([[ 0.2647],
        [-1.3317],
        [ 0.5373],
        [ 1.2838],
        [ 0.6807],
        [ 1.4483]], dtype=torch.float64)
x1.mean():	 tensor(0.4805, dtype=torch.float64)
x4:	 tensor(2.8830, dtype=torch.float64)
x4.item():	 2.8830280470194243
y:	 tensor([[1., 1.],
        [1., 1.]], requires_grad=True)
y.requires_grad:	 True
x.grad:	 None
x.grad_fn:	 None
z:	 tensor([[3., 3.],
        [3., 3.]], grad_fn=<AddBackward0>)
y.gard_fn:	 <AddBackward0 object at 0x000002A19877DF08>
z1:	 tensor([[12., 12.],
        [12., 12.]], grad_fn=<AddBackward0>)
out:	 tensor(12., grad_fn=<MeanBackward0>)
x.grad:	 tensor([[1.5000, 1.5000],
        [1.5000, 1.5000]])
y.gard:	 None
x.data:	 tensor([[1., 1.],
        [1., 1.]])
False
y.requires_grad False
a.requires_grad:	 False
a.requires_grad_(True) tensor([2., 3.], dtype=torch.float16, requires_grad=True)
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值