pytorch基础操作

本文介绍了如何使用PyTorch进行随机数生成,包括均匀分布和高斯分布,还涵盖了数据类型的转换、张量操作如全零矩阵、随机排列、索引选择、矩阵运算、广播机制、拼接与拆分等技巧。此外,讲解了矩阵乘法、点乘、指数运算以及数据裁剪、统计分析和基本的数学函数应用。
摘要由CSDN通过智能技术生成

**

# 本章为pytorch的学习
# import torch
# # 生成几到几之间的随机数整数
# data = torch.randint(10, 100, (3, 4))
# print()
# 
# # torch.randn()生成的均值为零,方差为1的高斯数
# # torch.rand()包含了从区间[0, 1)的均匀分布中抽取的一组随机数
# print(torch.randn(3, 4))
# print(torch.rand(3, 4).mean(dim=0).int().type())
# # 该方法生成均值为某数,方差为某数,记得需要是tensor类型哟
# print(torch.normal(torch.arange(1, 11,3.),torch.arange(1, 0, -0.3)))
# np.array 数据类型与tensor数据类型间的转化
# import numpy as np
# print(type(np.random.randn(3, 4)))
# print(torch.tensor(np.random.randn(3, 4)).float().type())
# print(torch.from_numpy(np.random.randn(3, 4)))
# full 操作
# print(torch.full((2, 3), 3.141592))
# randperm   就是random.shuffle的功能
# aa = torch.randint(len(range(12)),(3, 4))
# print(aa)
# idx = torch.randperm(4)
# print(idx)
# print(aa[:,idx])
# print(torch.rand(1, 3, 28, 28))
# batch = torch.rand(1, 3, 28, 28)
# print(batch.dim()) # 4
# print(batch[0].dim()) # 3
#
# print(batch.view(1, -1, 28).shape)
# print(batch.view(1, 3, 28*28).shape)
# print(batch[:,1, :14].shape)
# 在指定维度上隔行随机采样,采样行标记为后面的设定值
# print(batch.index_select(3, torch.tensor([0,1])).shape)
# print(batch[0, 2, ...].shape)
# mask_select 掩码选择指定参数
# data = torch.randint(1, 10, (3, 4))
# mask = data.ge(5)
# print(mask.type())
# print(torch.masked_select(data, mask))
# 维度变换
# p = torch.linspace(1, 12, 12).view(3, 4)
# pp = p.transpose(1, 0)
# print(pp.contiguous().view(1, -1))
# print(p.unsqueeze(1).shape)
# print(p.squeeze(1).shape)
# print(p[0])
# pp = p.clone()
# print(pp.view(1000).view(2,-1))
# 重复数据 repeat
# b = torch.rand(1,32,1,1,)
# print(b.repeat(4,2,1,1).shape)   #torch.Size([4, 1024, 1, 1])
# p.tranpose()用于交换两个维度
# p.permute() 用于交换所有维度
# broadcasting 自动扩张,可以扩张的前提是该维度为1,
# 简而言之俩tensor相加,比如[1,2, 3]可以和[10,2,3]、[2,3]、[3]
# broadcasting = torch.randint(1, 12, (3, 4))
# print(broadcasting)
# subbroadcasting = torch.randint(1, 12, (1,))
# print(subbroadcasting)
# print(broadcasting+subbroadcasting)
#拼接机制cat 拼接的前提是其他维度相同
# a = torch.rand(2, 3, 4)
# b = torch.rand(20, 3, 4)
# print(torch.cat((a, b),0).shape) # torch.Size([22, 3, 4])
#拼接机制stack 拼接的前提所有维度相同,并创建了新的维度
# a = torch.rand(20, 3, 4)
# b = torch.rand(20, 3, 4)
# print(torch.stack((a, b),0).shape)# torch.Size([2, 20, 3, 4])
# 拆分机制split 具体是指定维度上,给出具体拆分每组的个数
# splitt = torch.randn(16, 3, 28, 28)
# splita, splitb = splitt.split((int(splitt.shape[0]*0.4), splitt.shape[0]-int(splitt.shape[0]*0.4)),0)
# print(splita.shape,splitb.shape)
# 拆分机制chunk,具体是指定维度上,给出具体拆分组数
# splitt = torch.randn(16, 3, 28, 28)
# splita, splitb, c = splitt.chunk(3,0)
# print(splita.shape,splitb.shape, c.shape)
#矩阵相乘 matmul  @ mm
# a = torch.randn(4, 728)
# w = torch.randn(728, 256)
# b = torch.randn(256)
# print((torch.matmul(a, w)+b).shape)
# print((a@w+b).shape)
# print((torch.mm(a,w)+b).shape) # 注意mm这种方式只能二维,[b, c, w, h]这种是没法相乘的,
# matmul可以,把最后两维相乘
# 点乘
# a = torch.randn(3, 4)
# w = torch.randn(3, 4)
# print((a*w).shape)
# sqrt 开方操作 exp
# mat = torch.randint(1, 9, (3, 3))
# sqrtmat = torch.sqrt(mat)
# matmat = sqrtmat**2
# expmat = torch.exp(matmat)
# fanui = torch.log(expmat)
# print(mat,"\n" ,sqrtmat,"\n" ,matmat,"\n",expmat,"\n", fanui)
# clamp 裁剪 (min)(min,max)
# array = torch.randint(1, 100, (10, 10))
# print(torch.clamp(array, 10, 14))
# 属性统计 dim = 1表示在行上找最大值,并返回值和索引
# keepdimm = torch.randint(1, 10, (3, 4))
# print(keepdimm)
# a = torch.max(keepdimm, dim=1)
# print(a[0], a[1])
# topk 这个方法的作用返回前几的值和索引,这个前几可能是前几大也可能是前几小
# dim = 1 在行上找
# keepdimm = torch.randint(1, 10, (8, 10))
# topkk = keepdimm.topk(3, dim=1)
# print(topkk)
# int 与 bool的转化
# keepdimm = torch.randint(1, 10, (8, 10))
# booll = keepdimm > 5
# print(booll.int())
# where语句,满足条件的抓取a,不满足的抓取b
# ans = torch.randn(3, 2)
# print(ans)
# print(torch.where(ans > 0, 0, 1))
# small = torch.randint(1, 10, (3, 4))
# big = torch.randint(20, 100, (3, 4))
# print(small < 5)
# print(torch.where(small < 5, small, big))
# gather(input,dim, index ,out)  dim代表在那个维度上,index表示输入在该维度上的索引
# small = torch.randint(1, 10, (3, 4))
# print(small.topk(1, dim=1))
# ind = small.topk(1, dim=1)[1]
# print(torch.gather(small, dim=1, index=ind)) #逆操作

**

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

CV_er

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值