torch入门

张量的创建

import torch
import numpy as np

#创建一个张量
x=torch.randn((5,3),dtype=torch.float16)
#张量的形状
x.shape

#创建一个空张量
x=torch.empty((2,3),dtype=torch.float32)

#零张量
x=torch.zeros((2,3),dtype=torch.long)

#1张量
x=torch.ones(2,3)

#对角都是1
x=torch.eye(3,4)

#从列表创建,并返回列表
x=torch.tensor([[2,3,4],[2,3,6]],dtype=torch.float16)
x.tolist()

#从arr创建,并返回arr
a=np.random.random((2,2))
x=torch.from_numpy(a)
x.numpy()

'''
区别:from_numpy和torch.tensor
from_numpy:如果arr变化,由arr创建的tensor也会变化
torch.tensor:arr变化,由arr创建的tensor不会变化
'''

#改变形状,reshape更强大
x.reshape(1,-1)
x.view(1,-1)

常见计算

x=torch.tensor([[2,3,4],[2,3,6]])
y=torch.tensor([[1,2,1],[2,6,0]])

x+y

x-y

x / y

x*y

#求两个tensor对应位置上的最大值
torch.maximum(torch.tensor(3),x)

#平方
torch.pow(x,2)

#某个轴的最大值
torch.max(x,1)

梯度计算和梯度下降过程 

x=np.linspace(0,100,10000)
noise=np.random.uniform(size=(10000,))

#自定:w=10,b=10
y=10*x+10+noise

x = torch.from_numpy(x)
y = torch.from_numpy(y)

w=torch.randn(1,requires_grad=True)
b=torch.randn(1,requires_grad=True)

#回归拟合
for epoch in range(500000000):
    #计算预测值
    y_ = x * w + b
    #计算损失
    loss = torch.mean((y_ - y)**2)
    
    if epoch==0:

        #反向传播
        loss.backward()
       
    else:
        # 归零梯度
        w.grad.zero_()
        b.grad.zero_()
        #反向传播
        loss.backward()
    #梯度更新,步长的选择是个讲究活,不然会发散,或者训练太慢
    w.data = w.data - 2e-4 * w.grad.data
    b.data = b.data - 2e-4 * b.grad.data
    
    if loss<0.1:
        break
    #print(w,b)
    #w:10.0038;b:10.2498
    
    #print('epoch: {}, loss: {}'.format(epoch, loss.data))

使用矩阵乘法实现全连接层 

x=torch.randn((4,5))
w_true=torch.randint(1,10,size=(5,1),dtype=torch.float32)
b_true=torch.tensor(20.0)
noise=torch.randn(size=(4,1))
#矩阵乘法
y=x@w_true+b_true+noise

w=torch.zeros(size=(5,1),requires_grad=True,dtype=torch.float32)
b=torch.zeros(1,requires_grad=True)

#训练
for epoch in range(10000000):
    y_=x@w+b
    loss=torch.mean((y-y_)**2)
    
    if epoch==0:
        loss.backward()
    else:
        w.grad.zero_()
        b.grad.zero_()
        
        loss.backward()
        
    w.data=w.data - 2e-4 * w.grad.data
    b.data=b.data - 2e-4 *b.grad.data
    
    if loss<0.1:
        break
'''
#权重
w:[[ 0.5081],
        [ 5.0037],
        [ 0.8767],
        [ 4.9839],
        [13.5279]]
#偏置
b:[14.1485]
#损失
loss:0.1000
'''

使用nn.Linear层

from torch import nn
from torch import optim

#构建网络
net=nn.Linear(5,1,bias=True)
#构建优化器
optimizer=optim.Adam(net.parameters(),lr=2e-4)

for epoch in range(10000000):
    y_=net(x)
    loss=torch.mean((y-y_)**2)
    
   
    #梯度归零
    optimizer.zero_grad()
    #计算梯度
    loss.backward()
    #更新梯度
    optimizer.step()
    
    if loss<0.1:
        break

#权重
#[ 0.6655,  4.8166, -3.5347,  7.4862, 13.4877]
net.weight.data


#偏置
#[13.6001]
net.bias.data

#损失
0.0999

 激活函数

#ELU
def ELU_self(x, a=1.0):
    x=torch.tensor(x)
    x_0=torch.tensor(0)
    return torch.maximum(x_0, x) + torch.minimum(x_0, a * (torch.exp(x) - 1))


#LeakyReLU
def LeakyReLU_self(x, a=1e-2):
    x=torch.tensor(x)
    x_0=torch.tensor(0)
    return torch.maximum(x_0, x) + a * torch.minimum(x_0, x)


#ReLU
def ReLU_self(x):
    x=torch.tensor(x)
    x_0=torch.tensor(0)
    return torch.maximum(x_0,x)


#ReLU6
def ReLU6_self(x):
    x=torch.tensor(x)
    x_0=torch.tensor(0)
    x_6=torch.tensor(6)
    return torch.minimum(torch.maximum(x_0, x), x_6)


#SELU
def SELU_self(x,
              scale=1.0507009873554804934193349852946,
              a=1.6732632423543772848170429916717):
    x = torch.tensor(x)
    x_0 = torch.tensor(0)
    return scale * (torch.maximum(x_0, x) +
                    torch.minimum(x_0, a * (torch.exp(x) - 1)))


#CELU
def CELU_self(x, a=1.0):
    x = torch.tensor(x)
    x_0 = torch.tensor(0)
    return torch.maximum(x_0, x) + torch.minimum(x_0,
                                                 a * (torch.exp(x / a) - 1.0))


#Sigmoid
def Sigmoid_self(x):
    x = torch.tensor(x)
    return 1.0 / (1 + torch.exp(-x))


#LogSigmoid
def LogSigmoid_self(x):
    x = torch.tensor(x)
    return torch.log(1.0 / (1 + torch.exp(-x)))


#Tanh
def Tanh_self(x):
    x = torch.tensor(x)
    return 1 - 2.0 / (torch.exp(2 * x) + 1)


#Tanhshrink
def Tanhshrink_self(x):
    x = torch.tensor(x)
    return x + 2.0 / (torch.exp(2 * x) + 1) - 1


#Softplus
def Softplus_self(x, b=1.0):
    x = torch.tensor(x)
    return 1 / b * torch.log(1 + torch.exp(x * b))


#Softshrink,感觉就是中心化
def Softshrink_self(x,lambd=0.5):
    x_=torch.tensor(x)
    
    x_=torch.where(x_>lambd,x_-lambd,x_)
    x_=torch.where(x_<-lambd,x_+lambd,x_)
    x_[x==x_]=0
    
    return x_

卷积层原理和使用 

import matplotlib.pyplot as plt
#用来读取图片
from PIL import Image
import torch.nn as nn
from torchvision import transforms
from torchkeras import summary

image=Image.open('tu.jpg')

# 把图片数据转化成张量
img_transform = transforms.Compose([transforms.ToTensor()])
img_tensor = img_transform(image)

#卷积的输入是4维张量
#'_'操作是就地更改
img_tensor.unsqueeze_(dim=0)

flag=0
if flag:
    #输入通道,卷积个数,卷积核大小,步长,填充
    conv_layer = nn.Conv2d(in_channels=3,out_channels=1,kernel_size=5,stride=1,padding=2)
    # 初始化卷积层权值
    nn.init.xavier_normal_(conv_layer.weight.data)
    # nn.init.xavier_uniform_(conv_layer.weight.data)

    # calculation
    img_conv = conv_layer(img_tensor)
else:
    #转置卷积
    conv_layer_ts = nn.ConvTranspose2d(in_channels=3,out_channels=1,kernel_size=5,stride=1,padding=2)
    nn.init.xavier_normal_(conv_layer_ts.weight.data)
    img_conv_ts = conv_layer_ts(img_tensor)

参数的计算 

参数=卷积个数*卷积核大小*通道数+ 卷积个数

76 = 1*5*5*3+1

#参数个数
32*5*5*1+32

卷基层大小 

#(输入大小-卷积核大小+2倍的填充)/步长+1

#500=(500-5+2*2)/1+1

img_conv.shape

torch.Size([1, 1, 500, 500])

画图展示

img_tensor.squeeze_(dim=0)
img_conv.squeeze_(dim=0)
img_conv_ts.squeeze_(dim=0)

plt.subplot(131).imshow(np.transpose(img_tensor.data.numpy(),[1,2,0]))
plt.axis('off')
plt.subplot(132).imshow(np.transpose(img_conv.data.numpy(),[1,2,0]))
plt.axis('off')
plt.subplot(133).imshow(np.transpose(img_conv_ts.data.numpy(),[1,2,0]))
plt.tight_layout()
plt.axis('off')
plt.show()

损失函数

#标准的使用流程
criterion=Losscriterion()
loss=criterion(y_,y)

常见loss的使用

#BCELoss,二分类损失
#y_pred在前,y_true在后
loss=nn.BCELoss()

m=nn.Sigmoid()
x=torch.randn(3,requires_grad=True)
y_=m(x)
y=torch.randint(0,2,size=(3,),dtype=torch.float)
loss=loss(y_,y)
with torch.no_grad():
    loss.backward()
loss


# NLLLoss,多分类损失
loss=nn.NLLLoss()
m=nn.Softmax(dim=1)
x=torch.randn((3,4),requires_grad=True)
y_=m(x)
y=torch.randint(0,4,size=(3,))
loss=loss(y_,y)
with torch.no_grad():
    loss.backward()
loss

#L1Loss,MAE
loss=nn.L1Loss()
y_=torch.randn((1,5),requires_grad=True)
y=torch.randn((1,5))
loss=loss(y_,y)
with torch.no_grad():
    loss.backward()
loss

#MSELoss
loss=nn.MSELoss()
y_=torch.randn((1,5),requires_grad=True)
y=torch.randn((1,5))
loss=loss(y_,y)
with torch.no_grad():
    loss.backward()
loss

优化器的使用 

from torch import optim

#一般的流程
#定义优化器
optimizer=Optim()
#导数归零
optimizer.zero_grad()
#更新
optimizer.step()
x = torch.randn((4,5),requires_grad=False)
w_true = torch.randint(1, 10, size=(5, 1), dtype=torch.float)
b_true = torch.tensor(20.0)
noise = torch.randn(size=(4, 1))
y = x @ w_true + b_true + noise


result = {}
for lr in [0.01, 0.1, 0.5]:
    #每次要重新更新网络
    net = nn.Linear(5, 1, bias=True)
    #定义优化器
    optimizer = optim.SGD(net.parameters(), lr=lr)
    #定义损失
    mseloss = nn.MSELoss()

    for epoch in range(10000000):
        #梯度清零
        optimizer.zero_grad()
        #计算损失
        loss = mseloss(net(x), y)
        #反向传播
        loss.backward()
        #更新
        optimizer.step()

        if loss.item() < 0.1 or epoch >= 10000:
            result[lr] = {'loss': loss.item(), 'epoch': epoch}
            break
#结果
#当lr过大时,发散了,不能收敛
result=
{0.01: {'loss': 0.09930270910263062, 'epoch': 766},
 0.1: {'loss': 0.0925668329000473, 'epoch': 76},
 0.5: {'loss': nan, 'epoch': 10000}}

池化层


x=torch.randn(10,3,128,128)

#MaxPool2d
maxp=nn.MaxPool2d(5,3)
#42=(128-5+0*2)/3+1,是向下取整
maxp(x).shape

torch.Size([10, 3, 42, 42])

maxp(x)[0,0,0,4]
tensor(1.9936)

#AvgPool2d,取窗口的平均值
avgp=nn.AvgPool2d(5,3)
#42=(128-5+0*2)/3+1,是向下取整
avgp(x).shape

torch.Size([10, 3, 42, 42])

avgp(x)[0,0,0,4]
tensor(-0.1445)

归一化层

  1. BN 层去加速训练和帮助模型更好收敛
  2. BN 层仅在 batch size 足够大时才有明显的效果
x=torch.randint(0,256,size=(10,3,128,128)).float()

#BN
bn=nn.BatchNorm2d(3)
bn(x)[0,0,0,2]

tensor(1.1019, grad_fn=<SelectBackward>)

#GN
#num_channels需要被num_groups整除
gn=nn.GroupNorm(num_groups=3,num_channels=3)
gn(x)[0,0,0,2]

tensor(1.0831, grad_fn=<SelectBackward>)
  • 2
    点赞
  • 18
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值