(DL笔记)Dive into Deep Learning -- 数据操作

import torch
from numpy import exp, math

'''
data operation
'''
#matrix-op
matrix=torch.tensor([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
print('original matrix')
print(matrix)
print('--------------------')
print('matrix[1,2]')
print(matrix[1,2])
print('--------------------')
print('matrix[1,:]')
print(matrix[1,:])
print('--------------------')
print('matrix[:,2]')
print(matrix[:,2])
print('--------------------')
print('matrix[1:3,2]')
print(matrix[1:3,2])
print('--------------------')
print('matrix[::2,::2]')
print(matrix[::2,::2])
print('--------------------')
print('matrix[-1]')
print(matrix[-1])
print('--------------------')


'''
OUTPUT
--------------------
original matrix
tensor([[ 1,  2,  3,  4],
        [ 5,  6,  7,  8],
        [ 9, 10, 11, 12],
        [13, 14, 15, 16]])
--------------------
matrix[1,2]
tensor(7)
--------------------
matrix[1,:]
tensor([5, 6, 7, 8])
--------------------
matrix[:,2]
tensor([ 3,  7, 11, 15])
--------------------
matrix[1:3,2]
tensor([ 7, 11])
--------------------
matrix[::2,::2]
tensor([[ 1,  3],
        [ 9, 11]])
--------------------
matrix[-1]
tensor([13, 14, 15, 16])
--------------------
'''


x=torch.arange(10)
# x -- int in [0,10)
print(x)

print('shape -----' + str(x.shape))
print('#elements -----' + str(x.numel))

x=x.reshape(2,5)
print(x)

'''
OUTPUT
--------------------
tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
shape -----torch.Size([10])
#elements -----<built-in method numel of Tensor object at 0x7fdbe91210d0>
tensor([[0, 1, 2, 3, 4],
        [5, 6, 7, 8, 9]])
'''

i=torch.tensor([1,2,3,4])
j=torch.tensor([4.0,5,6,7]) #float

print('j')
print(j)
print('i+j')
print(i+j)
print('i*j')
print(i*j)
print('exp(j)')
print(exp(j))

'''
OUTPUT
--------------------
j
tensor([4., 5., 6., 7.])
i+j
tensor([ 5.,  7.,  9., 11.])
i*j
tensor([ 4., 10., 18., 28.])
exp(j)
tensor([  54.5981,  148.4132,  403.4288, 1096.6332])
'''
#concatenation
X = torch.arange(12, dtype=torch.float32).reshape((3,4))
Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) #float
print('------------ \n'+'x' )
print(X)
print(X.shape )
print('------------ \n'+'y' )
print(Y)
print(Y.shape )
print('------------ \n'+'cat(x,y)  vertically' )
print(torch.cat((X, Y), dim=0))
print(torch.cat((X, Y), dim=0).shape )
print('------------ \n'+'cat(x,y)  horizontally' )
print(torch.cat((X, Y), dim=1))
print(torch.cat((X, Y), dim=1).shape )

'''
OUTPUT
--------------------
x
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  6.,  7.],
        [ 8.,  9., 10., 11.]])
torch.Size([3, 4])
------------ 
y
tensor([[2., 1., 4., 3.],
        [1., 2., 3., 4.],
        [4., 3., 2., 1.]])
torch.Size([3, 4])
------------ 
cat(x,y)  vertically
tensor([[ 0.,  1.,  2.,  3.],
        [ 4.,  5.,  6.,  7.],
        [ 8.,  9., 10., 11.],
        [ 2.,  1.,  4.,  3.],
        [ 1.,  2.,  3.,  4.],
        [ 4.,  3.,  2.,  1.]])
torch.Size([6, 4])
------------ 
cat(x,y)  horizontally
tensor([[ 0.,  1.,  2.,  3.,  2.,  1.,  4.,  3.],
        [ 4.,  5.,  6.,  7.,  1.,  2.,  3.,  4.],
        [ 8.,  9., 10., 11.,  4.,  3.,  2.,  1.]])
torch.Size([3, 8])
'''

#memory management -- 尽量不要重新分配内存,+= 或者放入新的矩阵

X = torch.arange(12, dtype=torch.int).reshape((3,4))
Y = torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
print(id(Y))
Y=Y+X
print(id(Y))
Y+=X
print(id(Y))
print('--------------------')
Z = torch.zeros_like(Y)
print(id(Z))
Z[:]=Y+X
print(id(Z))

'''
OUTPUT
--------------------
139964398391984
139964398392944
139964398392944
--------------------
139964398391984
139964398391984
'''

#广播机制 -- 不同shape的矩阵先复制到等同的shape,后做加减运算
a = torch.arange(3).reshape((3, 1))
b = torch.arange(2).reshape((1, 2))
print(a), print(b),print(a+b)

'''
OUTPUT
--------------
tensor([[0],
        [1],
        [2]])
tensor([[0, 1]])
tensor([[0, 1],
        [1, 2],
        [2, 3]])
'''

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值