Pytorch基础(一) 初始tensor

本文主要包括Pytorch中张量的基本操作

#!/usr/bin/env torch
# -*- coding:utf-8 -*-
# @Time  : 2021/1/30, 12:07
# @Author: Lee
# @File  : base_process.py

from __future__ import print_function
import torch

x1 = torch.empty(5, 3)  # 生成一个5行3列的矩阵,不初始化
print('torch.empty(5, 3):\n', x1)
"""
torch.empty(5, 3):
 tensor([[9.2755e-39, 1.0561e-38, 6.9796e-39],
        [9.2755e-39, 8.9082e-39, 8.9082e-39],
        [1.0194e-38, 9.1837e-39, 4.6837e-39],
        [9.2755e-39, 1.0837e-38, 8.4490e-39],
        [1.1112e-38, 1.0194e-38, 9.0919e-39]])

"""

x2 = torch.rand(5, 3)  # 生成一个5行3列的0-1随机数字的tensor
print('torch.rand(5, 3):\n', x2)
"""
torch.rand(5, 3):
 tensor([[0.5743, 0.3368, 0.9248],
        [0.2669, 0.2339, 0.6719],
        [0.2343, 0.0802, 0.1925],
        [0.6387, 0.1446, 0.1647],
        [0.5917, 0.2945, 0.1726]])
"""

x3 = torch.zeros(5, 3, dtype=torch.long)  # 生成一个5行3列的0矩阵,且数据类型为long
print('torch.zeros(5, 3, dtype=torch.long):\n', x3)
"""
torch.zeros(5, 3, dtype=torch.long):
 tensor([[0, 0, 0],
        [0, 0, 0],
        [0, 0, 0],
        [0, 0, 0],
        [0, 0, 0]])
"""

x4 = torch.tensor([5.5, 3])  # 构造一个已知数据的张量
print('torch.tensor([5.5, 3]):\n', x4)
"""
torch.tensor([5.5, 3]):
 tensor([5.5000, 3.0000])
"""

x5_a = torch.ones(5, 3, dtype=torch.double)
x5_b = torch.randn_like(x5_a, dtype=torch.float)  # 生成一个与x5_a同大小的float矩阵
print("x5_a:\n", x5_a)
print("x5_b:\n", x5_b)
print("x5_a.size()=", x5_a.size())
"""
x5_a:
 tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]], dtype=torch.float64)
x5_b:
 tensor([[ 0.6318,  0.7818,  0.2896],
        [-1.5375, -1.0902, -2.2054],
        [ 0.9432,  0.0981, -0.6261],
        [-1.8649, -0.2275,  0.5104],
        [ 0.4113, -0.6067, -0.2163]])
x5_a.size()= torch.Size([5, 3])
"""

add_x1 = torch.tensor([[1, 2, 3], [4, 5, 6]])
add_x2 = torch.tensor([[4, 5, 6], [1, 2, 3]])
print("add_x1 + add_x2 = ", add_x1 + add_x2)  # 矩阵加法
print("torch.add(add_x1, add_x2) = ", torch.add(add_x1, add_x2))
result = torch.empty(2, 3)
torch.add(add_x1, add_x2, out=result)
print("result = ", result)
print("add_x2.add_(add_x1) = ", add_x2.add_(add_x1))  # in-place add add_x1 to add_x2
"""
add_x1 + add_x2 =  tensor([[5, 7, 9], [5, 7, 9]])
torch.add(add_x1, add_x2) =  tensor([[5, 7, 9], [5, 7, 9]])
result =  tensor([[5., 7., 9.], [5., 7., 9.]])
add_x2.add_(add_x1) =  tensor([[5, 7, 9], [5, 7, 9]])
"""

# 类似Numpy的索引操作
ix1 = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print("ix1[:, 1] =", ix1[:, 1])  # 取第一列[索引从0开始]
print("ix1[1, :] =", ix1[1, :])  # 取第一行[索引从0开始]
"""
ix1[:, 1] = tensor([2, 5, 8])
ix1[1, :] = tensor([4, 5, 6])
"""

# 如果想改变一个tensor的大小或者形状,可使用torch.view
vx1 = torch.tensor([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]])
vx2 = vx1.view(12)
vx3 = vx1.view(-1, 6)  # the size -1 is inferred form other dimensions
print("vx1 =", vx1, "vx1.size() =", vx1.size())
print("vx2 =", vx2, "vx2.size() =", vx2.size())
print("vx3 =", vx3, "vx3.size() =", vx3.size())
"""
vx1 = tensor([[ 1,  2,  3,  4],
        [ 4,  5,  6,  7],
        [ 7,  8,  9, 10]]) vx1.size() = torch.Size([3, 4])
vx2 = tensor([ 1,  2,  3,  4,  4,  5,  6,  7,  7,  8,  9, 10]) vx2.size() = torch.Size([12])
vx3 = tensor([[ 1,  2,  3,  4,  4,  5],
        [ 6,  7,  7,  8,  9, 10]]) vx3.size() = torch.Size([2, 6])
"""

rx1 = torch.randn(1)
print("rx1 = ", rx1)  # rx1对象
print("x.item() = ", rx1.item())  # 获取rx1的值
"""
rx1 =  tensor([0.7057])
x.item() =  0.7057303786277771
"""

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值