#04数据操作/数据预处理
import torch
x = torch.arange(12)#x is a tensor
print(x)
print(x.shape)#output the shape of x
x = x.reshape(3,4)#reshape x as a tensor with 3 rows and 4 columns
print(x)
print(x.shape)
y = torch.zeros((2,3,4))#create a tesor with all zeros
print(y)
print(y.shape)
z = torch.tensor([[[2,1,4,3],[1,2,3,4],[4,3,2,1]]])#torch.Size([1, 3, 4])
print(z)
print(z.shape)
m = torch.tensor([1.0,2,4,8])#create a float tensor
n = torch.tensor([2,2,2,2])
print(m+n)
print(m-n)
print(m*n)
print(m/n)
print(m**n)#exponential calculate
X = torch.arange(12,dtype=torch.float32).reshape((3,4))#datatype
Y = torch.tensor([[2.0,1,4,3],[1,2,3,4],[4,3,2,1]])
print(torch.cat((X,Y),dim = 0))#contact the two tensors in 0 dim在第0维合并
print(torch.cat((X,Y),dim = 1))#在第一维合并
print(X.sum())#返回一个只有一个元素的tensor 对X中的所有元素求和
a = torch.arange(3).reshape((3,1))
b = torch.arange(2).reshape((1,2))
print(a)
print(b)
print(a+b)#a和b维度一致 广播机制 将a和b都调整成3*2的矩阵
print(X[-1])#-1 represents the final element
print(X[1:3])#[1:3)
X[1,2] = 9
print(X)
X[0:2,:] = 12#x的0到1行的所有列赋值为12
print(X)
before = id(Y)#id(Y)表示Y原来在内存中的位置
Y = Y+X
print(id(Y) == before)#一些操作可能会导致为新结果分配内存
#执行原地操作
Z = torch.zeros_like(Y)
print('id(Z):',id(Z))
Z[:] = X+Y#这里改成X+=Y也是一样的 在后续计算中没有重复使用X
print('id(Z):',id(Z))
A = X.numpy()#转化为numpy张量
B = torch.tensor(A)
print(type(A))#numpy.ndarray
print(type(B))#torch.tensor
A = torch.tensor([3.5])#将大小为1的张量转换为python张量
print(A)
print(A.item())
print(float(A))
print(int(A))
04-数据操作+数据预处理第一部分数据操作 动手深度学习
最新推荐文章于 2024-08-03 21:10:17 发布