pytorch 学习笔记(二)

import torch
import numpy as np
a=np.array([2,3,3])
print(a)
b=torch.from_numpy(a)
print(b)
*out:[2 3 3]
tensor([2, 3, 3], dtype=torch.int32)*

a=np.ones([2,3])
b=torch.from_numpy(a)
print(b)
*out:tensor([[1., 1., 1.],
        [1., 1., 1.]], dtype=torch.float64)*


# uninitialized 不初始化empty() 直接使用会出现nan infinity等bug
a=torch.empty(3)
print(a)
*out:tensor([ 0.0000,  0.0000, -0.0002])*

# set default type 默认数据类型
# Tensor==FloatTensor
# DoubleTensor 精度高 64字节长
torch.set_default_tensor_type(torch.DoubleTensor)
print(torch.tensor([1.2,3]).type())
*out:torch.DoubleTensor*


# rand 从0-1随机均值分布
a=torch.rand(2,2)
print(a)
*out:tensor([[0.7875, 0.2231],
        [0.8321, 0.0684]])*

# rand_like(tensor) 读取tensor.shape传入rand
print(torch.rand_like(a))
*out:tensor([[0.4152, 0.7236],
        [0.9635, 0.0884]])*

# randint(min,max,[d1,d2,...]) 参数在min-max不包括max
a=torch.randint(1,10,[3,3])
print(a)
*out:tensor([[6, 8, 8],
        [3, 4, 4],
        [3, 6, 4]])*

# randn
a=torch.randn(3,3)
print(a)
*out:tensor([[-0.5456, -2.0239,  0.4534],
        [ 1.1598, -0.2270,  0.8044],
        [-1.0634,  0.3796,  1.0218]])*

# full
a=torch.full([2,3],9)
print(a)
*out:tensor([[9., 9., 9.],
        [9., 9., 9.]])*

# arange/range
a=torch.arange(10)
print(a)
a=torch.arange(0,10)
print(a)
b=torch.arange(0,10,2)
print(b)
b=torch.arange(0,10,4)
print(b)
b=torch.arange(0,10,3)
print(b)
*out:tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
tensor([0, 2, 4, 6, 8])
tensor([0, 4, 8])
tensor([0, 3, 6, 9])*

# linspace/logspace
a=torch.linspace(0,10,steps=10)
print(a)
a=torch.linspace(0,10,steps=11)
print(a)
a=torch.logspace(0,-1,steps=10)#10**x
print(a)# 10*0=1...10*(-1)=0.1
*out:tensor([ 0.0000,  1.1111,  2.2222,  3.3333,  4.4444,  5.5556,  6.6667,  7.7778,
         8.8889, 10.0000])
tensor([ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9., 10.])
tensor([1.0000, 0.7743, 0.5995, 0.4642, 0.3594, 0.2783, 0.2154, 0.1668, 0.1292,
        0.1000])*

# Ones/zeros/eye *_like
a=torch.ones(3,3)
print(a)
a=torch.zeros(3,3)
print(a)
a=torch.eye(3,3)#只能接受一两个参数
print(a)
*out:tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]])
tensor([[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]])
tensor([[1., 0., 0.],
        [0., 1., 0.],
        [0., 0., 1.]])*

#randperm(x) 生成0-x的随机索引 不包括x
#numpy里的random.shuffle
a=torch.rand(2,3)
b=torch.rand(2,3)
print(a)
print(b)
idx=torch.randperm(2)
print(idx)
print(a[idx])
print(b[idx])
print(a,b)
*out:tensor([[0.6153, 0.7362, 0.5055],
        [0.3819, 0.7944, 0.7903]])
tensor([[0.2867, 0.7898, 0.1244],
        [0.1755, 0.6209, 0.2582]])
tensor([1, 0])
tensor([[0.3819, 0.7944, 0.7903],
        [0.6153, 0.7362, 0.5055]])
tensor([[0.1755, 0.6209, 0.2582],
        [0.2867, 0.7898, 0.1244]])
tensor([[0.6153, 0.7362, 0.5055],
        [0.3819, 0.7944, 0.7903]]) tensor([[0.2867, 0.7898, 0.1244],
        [0.1755, 0.6209, 0.2582]])*
#indexing
#a=[b,c,m,n]
#a[b].shape=[c,m,n]

#select first/last N
a=torch.rand(3,3,28,28)
b=a[:2,1:,:,:].shape
print(b)
b=a[:2,-1:,:,:].shape
print(b)
*out:torch.Size([2, 2, 28, 28])
torch.Size([2, 1, 28, 28])*

# select by steps
b=a[:2,-1:,0:28:2,0:28:4].shape
print(b)
*out:torch.Size([2, 1, 14, 7])*

# select by specific index
b=a.index_select(0,torch.tensor([0,2])).shape
print(b)
b=a.index_select(2,torch.arange(8)).shape
print(b)
*out:torch.Size([2, 3, 28, 28])
torch.Size([3, 3, 8, 28])*

# ...
b=a[0,...].shape
print(b)
b=a[0,...,:3].shape
print(b)
*out:torch.Size([3, 28, 28])
torch.Size([3, 28, 3])*

# select by mask掩码
x=torch.randn(3,4)
print(x)
mask=x.ge(0.5)# ge greater equal 大于
print(mask)
a=torch.masked_select(x,mask)
print(a)
print(a.shape)
*out:tensor([[ 0.6893,  0.0711, -1.0243,  1.0777],
        [ 0.1276,  1.0938,  0.7971,  0.5204],
        [-1.0290, -0.3457,  0.7780,  0.5476]])
tensor([[1, 0, 0, 1],
        [0, 1, 1, 1],
        [0, 0, 1, 1]], dtype=torch.uint8)
tensor([0.6893, 1.0777, 1.0938, 0.7971, 0.5204, 0.7780, 0.5476])
torch.Size([7])*

# select by flatten index
src=torch.tensor([[4,3,5],[6,7,8]])
a=torch.take(src,torch.tensor([0,2,5]))
print(src)
print(a)
*out:tensor([[4, 3, 5],
        [6, 7, 8]])
tensor([4, 5, 8])*

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值