pytorch的一些思考(长期更新)

本文介绍使用PyTorch构建神经网络进行CIFAR10数据集图像分类的全过程,包括数据预处理、模型定义、训练及测试。通过实践演示如何利用卷积神经网络(Convolutional Neural Network, CNN)解决复杂图像分类问题。
摘要由CSDN通过智能技术生成
# 图像分类问题的模型
# https://blog.csdn.net/m0_37306360/article/details/79312334 参考
import torch
import torchvision
import torchvision.transforms as transforms

# define a img transform proc
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

# load the CIFAR10的dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)

testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform)

# dataset (Dataset) – 加载数据的数据集。
# batch_size (int, optional) – 每个batch加载多少个样本(默认: 1)。
# shuffle (bool, optional) – 设置为True时会在每个epoch重新打乱数据(默认: False).
# sampler (Sampler, optional) – 定义从数据集中提取样本的策略。如果指定,则忽略shuffle参数。
# num_workers (int, optional) – 用多少个子进程加载数据。0表示数据将在主进程中加载(默认: 0)
# collate_fn (callable, optional) –
# pin_memory (bool, optional) –
# drop_last (bool, optional) – 如果数据集大小不能被batch size整除,则设置为True后可删除最后一个不完整的batch。
# 如果设为False并且数据集的大小不能被batch size整除,则最后一个batch将更小。(默认: False)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
                                          shuffle=True, num_workers=1)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
                                         shuffle=False, num_workers=1)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

import matplotlib.pyplot as plt
import numpy as np


# functions to show an image
def imgshow(img):
    img = img / 2 + 0.5  # unnormalize
    plt.imshow(np.transpose(img, (1, 2, 0)))
    plt.show()


# 3*32*32
import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        # torch.Size([4, 3, 32, 32]) torch.Size([4])
        # MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
        self.pool = nn.MaxPool2d(2, 2)
        print(self.pool)

        # train inputs [4, 3, 32, 32]
        # train outputs [4]

        # 参数: Conv2d(
        #
        #     输入channels,
        #
        #     输出channels,
        #
        #     kernel_size, )

        # [4, 3, 32, 32] -conv1 [3,6,4] [[32-4+1]] > [4, 6, 29, 29] --- relu -->
        # [4, 6, 29, 29])---pool(2,2) /2---> [4, 6, 14, 14] -conv2 [6:in,16:out,5] [[14-5+1]] > [4, 16, 10, 10] -relu-> [4, 16, 10, 10]
        # --pool(2,2) /2 -->[4, 16, 5, 5] -进入Liner层-> [4,16*5*5] --> [4, 120] --> [4,84] --> [4,10]
        self.conv1 = nn.Conv2d(3, 6, 4)
        self.conv2 = nn.Conv2d(6, 16, 5)

        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        print(self.conv1(x).shape)
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        # fully connect

        # np.reshape()和torch.view()效果一样,reshape()操作nparray,view()操作tensor
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.softmax(self.fc3(x))
        return x


# our model
net = Net()
# enf of define the model

# Define loss (Cross-Entropy)
import torch.optim as optim

loss_func = nn.CrossEntropyLoss()
# SGD with momentum
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

if __name__ == '__main__':
    # show the img
    # get some random training images
    dataiter = iter(trainloader)
    images, labels = dataiter.next()
    # torch.Size([4, 3, 32, 32]) torch.Size([4])
    print(images.shape, labels.shape)
    # show images torch.Size([3, 36, 36 * 4 = 138])
    imgshow(torchvision.utils.make_grid(images))
    # print labels
    print(' '.join('%5s' % classes[labels[j]] for j in range(4)))

    # Train the network
    for epoch in range(5):
        running_loss = 0.0
        # 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标,一般用在
        # for 循环当中。 属于增强的for循环
        for i, data in enumerate(trainloader):
            # get the inputs
            inputs, labels = data
            # torch.Size([4, 3, 32, 32]) torch.Size([4])

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward 执行forward方法
            outputs = net(inputs)
            # loss
            loss = loss_func(outputs, labels)
            # backward
            loss.backward()
            # update weights
            optimizer.step()

            # print statistics
            running_loss += loss.data.item()
            if i % 2000 == 1999:  # print every 2000 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0
    print("Finished Training")

    print("Beginning Testing")
    correct = 0
    total = 0
    for data in testloader:
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum()

    print('Accuracy of the network on the 10000 test images: %d %%' % (
            100 * correct / total))

# https://www.cnblogs.com/icodeworld/p/11529704.html
# 建立模型的方式
# 图像分类问题的模型
# https://blog.csdn.net/m0_37306360/article/details/79312334 参考
import torch
import torchvision
import torchvision.transforms as transforms

# define a img transform proc
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

# load the CIFAR10 dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)

testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform)

# dataset (Dataset) – 加载数据的数据集。
# batch_size (int, optional) – 每个batch加载多少个样本(默认: 1)。
# shuffle (bool, optional) – 设置为True时会在每个epoch重新打乱数据(默认: False).
# sampler (Sampler, optional) – 定义从数据集中提取样本的策略。如果指定,则忽略shuffle参数。
# num_workers (int, optional) – 用多少个子进程加载数据。0表示数据将在主进程中加载(默认: 0)
# collate_fn (callable, optional) –
# pin_memory (bool, optional) –
# drop_last (bool, optional) – 如果数据集大小不能被batch size整除,则设置为True后可删除最后一个不完整的batch。
# 如果设为False并且数据集的大小不能被batch size整除,则最后一个batch将更小。(默认: False)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
                                          shuffle=True, num_workers=1)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
                                         shuffle=False, num_workers=1)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

import matplotlib.pyplot as plt
import numpy as np


# functions to show an image
def imgshow(img):
    img = img / 2 + 0.5  # unnormalize
    plt.imshow(np.transpose(img, (1, 2, 0)))
    plt.show()


import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        # MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
        # 参数: Conv2d(
        #
        #     输入channels,
        #
        #     输出channels,
        #
        #     kernel_size, )
        # [4, 3, 32, 32] -conv1 [3,6,4] [[32-4+1]] > [4, 6, 29, 29] --- relu -->
        # [4, 6, 29, 29])---pool(2,2) /2---> [4, 6, 14, 14] -conv2 [6:in,16:out,5] [[14-5+1]] > [4, 16, 10, 10] -relu-> [4, 16, 10, 10]
        # --pool(2,2) /2 -->[4, 16, 5, 5] -进入Liner层-> [4,16*5*5] --> [4, 120] --> [4,84] --> [4,10]
        self.pool = nn.MaxPool2d(2, 2)
        self.conv1 = nn.Conv2d(3, 6, 4)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


net = Net()
import torch.optim as optim

loss_func = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)


def train(epoch):
    # running_loss = 0.0
    # 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标,一般用在
    # for 循环当中。 属于增强的for循环
    for i, data in enumerate(trainloader):
        # get the inputs
        inputs, labels = data
        # torch.Size([4, 3, 32, 32]) torch.Size([4])

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward 执行forward方法
        outputs = net(inputs)
        # loss
        loss = loss_func(outputs, labels)
        # backward
        loss.backward()
        # update weights
        optimizer.step()

        # print statistics
        # running_loss += loss.data.item()
        # if i % 2000 == 1999:  # print every 2000 mini-batches
        #     print('[%d, %5d] loss: %.3f' %
        #           (epoch + 1, i + 1, running_loss / 2000))
        #     running_loss = 0.0
        if i % 200 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, i * len(label), len(trainloader.dataset),
                       100. * i / len(trainloader), loss.item()))


# def test():
#     correct = 0
#     total = 0
#     for data in testloader:
#         images, labels = data
#         outputs = net(images)
#         _, predicted = torch.max(outputs.data, 1)
#         total += labels.size(0)
#         correct += (predicted == labels).sum()

def test():
    test_loss = 0
    correct = 0
    # 测试集
    for data, target in testloader:
        # data, target = Variable(data, volatile=True), Variable(target)
        output = net(data)
        # sum up batch loss
        test_loss += F.nll_loss(output, target).item()
        # get the index of the max
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(testloader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(testloader.dataset),
        100. * correct / len(testloader.dataset)))


if __name__ == '__main__':
    for epoch in range(1, 20):
        train(epoch)
        test()

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable

# Training settings
batch_size = 64

# MNIST Dataset
train_dataset = datasets.MNIST(root='./data/',
                               train=True,
                               transform=transforms.ToTensor(),
                               download=True)

test_dataset = datasets.MNIST(root='./data/',
                              train=False,
                              transform=transforms.ToTensor())

# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = nn.Linear(784, 520)
        self.l2 = nn.Linear(520, 320)
        self.l3 = nn.Linear(320, 240)
        self.l4 = nn.Linear(240, 120)
        self.l5 = nn.Linear(120, 10)

    def forward(self, x):
        # Flatten the data (n, 1, 28, 28) --> (n, 784)
        x = x.view(-1, 784)
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return F.log_softmax(self.l5(x), dim=1)
        # return self.l5(x)


model = Net()

optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)


def train(epoch):
    # 每次输入barch_idx个数据
    for batch_idx, (data, target) in enumerate(train_loader):
        # data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        # loss
        print(target)
        # tensor([7, 4, 0, 1, 7, 2, 4, 5, 4, 1, 7, 5, 2, 1, 8, 7, 0, 7, 1, 8, 7, 1, 7, 1,
        # 7, 5, 6, 5, 4, 7, 0, 7, 3, 7, 7, 8, 7, 7, 7, 6, 6, 1, 6, 3, 1, 7, 6, 7,
        # 9, 0, 1, 8, 6, 9, 8, 5, 9, 0, 7, 8, 4, 6, 6, 3])
        loss = F.nll_loss(output, target)
        loss.backward()
        # update
        optimizer.step()
        if batch_idx % 200 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))


def test():
    test_loss = 0
    correct = 0
    # 测试集
    for data, target in test_loader:
        # data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        # sum up batch loss
        test_loss += F.nll_loss(output, target).item()
        # get the index of the max
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))


for epoch in range(1, 6):
    train(epoch)
    test()

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np

import matplotlib.pyplot as plt

# Training settings
batch_size = 64

# MNIST Dataset
train_dataset = datasets.MNIST(root='./data/',
                               train=True,
                               transform=transforms.ToTensor(),
                               download=True)

test_dataset = datasets.MNIST(root='./data/',
                              train=False,
                              transform=transforms.ToTensor())

# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=1,
                                          shuffle=True)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = nn.Linear(784, 520)
        self.l2 = nn.Linear(520, 320)
        self.l3 = nn.Linear(320, 240)
        self.l4 = nn.Linear(240, 120)
        self.l5 = nn.Linear(120, 10)

    def forward(self, x):
        # Flatten the data (n, 1, 28, 28) --> (n, 784)
        x = x.view(-1, 784)
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return F.log_softmax(self.l5(x), dim=1)
        # return self.l5(x)


model = Net()

optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)


def train(epoch):
    # 每次输入barch_idx个数据
    for batch_idx, (data, target) in enumerate(train_loader):
        # data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        # loss
        # tensor([7, 4, 0, 1, 7, 2, 4, 5, 4, 1, 7, 5, 2, 1, 8, 7, 0, 7, 1, 8, 7, 1, 7, 1,
        # 7, 5, 6, 5, 4, 7, 0, 7, 3, 7, 7, 8, 7, 7, 7, 6, 6, 1, 6, 3, 1, 7, 6, 7,
        # 9, 0, 1, 8, 6, 9, 8, 5, 9, 0, 7, 8, 4, 6, 6, 3])
        loss = F.nll_loss(output, target)
        loss.backward()
        # update
        optimizer.step()
        if batch_idx % 200 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))


def test():
    test_loss = 0
    correct = 0
    # 测试集
    for data, target in test_loader:
        # [64, 1, 28, 28] input
        # print(type(data), data.size())
        # data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        # [64, 10] output
        print(output.size())
        # sum up batch loss
        test_loss += F.nll_loss(output, target).item()
        # get the index of the max
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))


# functions to show an image
def imgshow(img):
    img = img / 2 + 0.5  # unnormalize
    plt.imshow(np.transpose(img, (1, 2, 0)))
    plt.show()


def evaluate():
    dataiter = iter(test_loader)
    images, labels = dataiter.next()
    print(images.shape)
    output = model(images)
    # output shape: torch.Size([1, 10])
    print("output shape:", output.shape)

    pred = output.max(1, keepdim=True)[1].item()

    print(pred)
    imgshow(torchvision.utils.make_grid(images))

    # [64, 10] output
    # [1, 1, 28, 28]


# for epoch in range(1, 6):
#     train(epoch)
#     test()
# torch.save(model, 'model.pkl')

model = torch.load('model.pkl')
# PIL
from PIL import Image

# Image path
# im_path = "./data/one.png"

# 1. path
# im1 = Image.open(im_path)
# print(' From image path {}'.format(im1))
# image_size = im1.resize((28, 28), Image.ANTIALIAS)
# image_black_white = image_size.convert('1')
#
# img = np.array(image_black_white)
# img = np.expand_dims(img, 0).repeat(1, axis=0)
# img = np.expand_dims(img, 0).repeat(1, axis=0)
# img = torch.from_numpy(img)
# print(img.shape)

evaluate()

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np

import matplotlib.pyplot as plt

# Training settings
batch_size = 64

# MNIST Dataset
train_dataset = datasets.MNIST(root='./data/',
                               train=True,
                               transform=transforms.ToTensor(),
                               download=True)

test_dataset = datasets.MNIST(root='./data/',
                              train=False,
                              transform=transforms.ToTensor())

# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=1,
                                          shuffle=True)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = nn.Linear(784, 520)
        self.l2 = nn.Linear(520, 320)
        self.l3 = nn.Linear(320, 240)
        self.l4 = nn.Linear(240, 120)
        self.l5 = nn.Linear(120, 10)

    def forward(self, x):
        # Flatten the data (n, 1, 28, 28) --> (n, 784)
        x = x.view(-1, 784)
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return F.log_softmax(self.l5(x), dim=1)
        # return self.l5(x)


model = Net()

optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)


def train(epoch):
    # 每次输入barch_idx个数据
    for batch_idx, (data, target) in enumerate(train_loader):
        # data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        # loss
        # tensor([7, 4, 0, 1, 7, 2, 4, 5, 4, 1, 7, 5, 2, 1, 8, 7, 0, 7, 1, 8, 7, 1, 7, 1,
        # 7, 5, 6, 5, 4, 7, 0, 7, 3, 7, 7, 8, 7, 7, 7, 6, 6, 1, 6, 3, 1, 7, 6, 7,
        # 9, 0, 1, 8, 6, 9, 8, 5, 9, 0, 7, 8, 4, 6, 6, 3])
        loss = F.nll_loss(output, target)
        loss.backward()
        # update
        optimizer.step()
        if batch_idx % 200 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))


def test():
    test_loss = 0
    correct = 0
    # 测试集
    for data, target in test_loader:
        # [64, 1, 28, 28] input
        # print(type(data), data.size())
        # data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        # [64, 10] output
        print(output.size())
        # sum up batch loss
        test_loss += F.nll_loss(output, target).item()
        # get the index of the max
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))


# functions to show an image
def imgshow(img):
    # [3, 28, 28]
    img = np.transpose(img, (1, 2, 0))
    # print(img.shape)
    plt.imshow(img)
    plt.show()


def evaluate(img):
    dataiter = iter(test_loader)
    images, labels = dataiter.next()

    output = model(img)
    # output shape: torch.Size([1, 10])
    print("output shape:", output.shape)

    pred = output.max(1, keepdim=True)[1].item()

    print(pred)
    # [1, 1, 28, 28]
    # [0,1] 之间
    imgshow(torchvision.utils.make_grid(img))

    # [64, 10] output
    # [1, 1, 28, 28]


# for epoch in range(1, 6):
#     train(epoch)
#     test()
# torch.save(model, 'model.pkl')

model = torch.load('model.pkl')
# PIL
from PIL import Image

# Image path
im_path = "./data/nine.jpg"

# 1. path
img = Image.open(im_path)
img = img.resize((28, 28))
# img = img.convert('L')

# [n:{batch size}, 1, 28, 28] input
img = np.array(img)
img = 255 - img
img = img / 255

# img = np.expand_dims(img, 0).repeat(1, axis=0)
# img = np.expand_dims(img, 0).repeat(1, axis=0)
img = torch.from_numpy(img).type(torch.float32)
print(img.shape, "shape")
evaluate(img)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值