Pytorch入门级函数使用

tesorboard

from torch.utils.tensorboard import SummaryWriter

writer = SummaryWriter("logs")

img = Image.open("test.jpg")
#第二个参数是类型是img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data
img_array = np.array(img)  # 输入类型有要求,可以转换为ndarry格式
print(type(img_array))
print(img_array.shape)
writer.add_image("test",img_array,2,dataformats="HWC")   #参数  标题 指定格式图片 步骤  数据格式(默认是CHW格式,因此这里需要修改)

writer.add_image()
for i in range(100):
    writer.add_scalar("y=3x",3*i,i)  #参数分别为标题,y轴数值,x轴步长

#打开tensorboard事件文件
#在命令行输入tensorboard --logdir=logs  ctrl+c是quit
#修改端口 tensorboard --logdir=logs --port=6007
#在创建多个事件的时候,如果不改标题,那么下次会在上次事件的基础上进行创建,可能会出现奇怪的图像,因此建议想创建新的事件的时候就删掉以前的事件,或者在空白的子文件下创建。

writer.close()

transform

from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
#将图片转换为tensor类型是为了方便神经网络的训练
img = Image.open("test.jpg")
#transform的使用
#transform.ToTensor是系统提供给我们的一个打造工具的模板   这里trans是我们制造的工具
trans = transforms.ToTensor()
#利用我们自己打造的trans向其中送入图片,就得到了我们想要的tensor数据
img_tensor = trans(img)
writer = SummaryWriter("logs2")
#这里可以直接送入img_tensor  不需要再用numpy转换  因为已经是tensor数据了
writer.add_image("tensor_img",img_tensor)

#Normalize
print(img_tensor[0][0][0])
trans_norm = transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5]) #这里得到值大小为(原像素-第一个参数)/第二个参数
img_norm = trans_norm(img_tensor)
print(img_norm[0][0][0])
writer.add_image("Normalize",img_norm)

#Compose()  参数为两个transform格式数据组成的列表  目的是将几个变换组合在一起
#上面writer.add_image的输入为tensor数据类型,因此需要对resize后的img做totensor处理  而是同compose可以将两个任务组合一起实现
trans_resize2 = transforms.Resize(512)
trans_compose = transforms.Compose([trans_resize2,trans])
img_resize2 = trans_compose(img)
writer.add_image("Resize",img_resize2,1)

#RandomCrop(object): Crop the given PIL Image at a random location.
trans_random = transforms.RandomCrop(512)
trans_compose_2 = transforms.Compose([trans_random,trans])
for i in range(10):
    img_crop = trans_compose_2(img)
    writer.add_image("RandomCrop",img_crop,i)


writer.close()

这里只是举了几个例子,总结在调用这些函数时,我们要关注输入和输出,看官方文档,关注方法需要的参数,不知道返回值的时候可以利用print(type())或者debug

torch.nn.functional

import torch.nn.functional as F
import torch

input = torch.tensor([[1,2,3,4],
                      [2,4,6,8],
                      [3,5,7,9],
                      [5,4,6,3]])
kerner = torch.tensor([[1,0,1],
                       [2,0,2],
                       [1,1,1]])
input = torch.reshape(input,(1,1,4,4))
kerner = torch.reshape(kerner,(1,1,3,3))
output = F.conv2d(input,kerner,stride=1,padding=1)
print(output)

convolution

from torch import nn
import torchvision
from torch.nn import Conv2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("./data",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset,batch_size=64)

class Conv(nn.Module):
    def __init__(self):
        super(Conv,self).__init__()
        self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)

    def forward(self,x):
        x = self.conv1(x)
        return x

myConv = Conv()
import torch.utils.tensorboard
writer = SummaryWriter("logs")

step = 0
for data in dataloader:
    imgs, targets = data
    output = myConv(imgs)
    output = torch.reshape(output,(-1,3,30,30))
    writer.add_images("input",imgs,step)
    writer.add_images("output",output,step)
    step = step+1

maxpooling

from torch import nn
from torch.nn import MaxPool2d
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("./data",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset,batch_size=64)

class maxpool(nn.Module):
    def __init__(self):
        super(maxpool,self).__init__()
        self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)

    def forward(self,input):
        output = self.maxpool1(input)
        return output

writer = SummaryWriter("logs_maxpool")
myMaxpool = maxpool()
step = 0

for data in dataloader:
    imgs, targets = data
    writer.add_images("input_maxpool",imgs,step)
    output = myMaxpool(imgs)
    writer.add_images("output_maxpool",output,step)
    step = step+1

非线性激活

#非线性激活sigmoid函数示例
import torchvision
from torch.nn import Sigmoid
from torch.utils.data import DataLoader
from torch import nn
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("./data",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset,batch_size=64)

class sigmoid(nn.Module):
    def __init__(self):
        super(sigmod,self).__init__()
        sigmod1 = Sigmoid()

    def forward(self,input):
        output = sigmoid(input)
        return output

mySigmoid = Sigmoid()
writer = SummaryWriter("logs_sigmoid")
step = 0
for data in dataloader:
    imgs, targets = data
    writer.add_images("input_sigmoid",imgs,global_step=step)
    output = mySigmoid(imgs)
    writer.add_images("output_sigmoid",output,step)
    step = step+1

线性层

class Liner(nn.Module):
    def __init__(self):
        super(Liner, self).__init__()
        self.linear1 = Linear(196608,10)

    def forward(self,input):
        output = self.linear1(input)
        return output

myLinear = Liner()
for data in dataloader:
    imgs, targets = data
    print(imgs.shape)
    output = torch.flatten(imgs)  #展平成一行
    #output = torch.reshape(imgs,(1,1,1,-1))
    print(output.shape)
    output = myLinear(output)
    print(output.shape)

Sequential

#Sequential
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Linear, Sequential
import torch
from torch.nn.modules.flatten import Flatten
from torch.utils.tensorboard import SummaryWriter


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        # self.conv1 = Conv2d(3,32,5,padding=2)
        # self.maxpool1 = MaxPool2d(2)
        # self.conv2 = Conv2d(32,32,5,padding=2)
        # self.maxpool2 = MaxPool2d(2)
        # self.conv3 = Conv2d(32,64,5,padding=2)
        # self.maxpool3 = MaxPool2d(2)
        # self.flatten = Flatten()
        # self.linear1 = Linear(1024,64)
        # self.linear2 = Linear(64,10)

        self.model1 = Sequential(
            Conv2d(3,32,5,padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self,x):
        # x = self.conv1(x)
        # x = self.maxpool1(x)
        # x = self.conv2(x)
        # x = self.maxpool2(x)
        # x = self.conv3(x)
        # x = self.maxpool3(x)
        # x = self.flatten(x)
        # x = self.linear1(x)
        # x = self.linear2(x)

        x = self.model1(x)
        return x

tudui = Tudui()
input = torch.ones((64,3,32,32))
output = tudui(input)

#将神经网络处理过程可视化
writer = SummaryWriter("logs_seq")
writer.add_graph(tudui,input)
writer.close()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值