电脑之前烧坏过,今年刚换的主板和散热器,看到cpu占用率100%不敢跑太多数据,所以选了1000个批次,每个批次取了4个图片,额应该是这样,反正是减少了数据集和批次大小。
最后准确率13%有点意外,因为还不是我自己训练的数据,是直接下载预训练的数据
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
from torchvision.models import vgg16
# 加载MNIST数据集并进行预处理
transform = transforms.Compose([
transforms.Resize((224, 224)), # 将图像调整为VGG-16所需的输入尺寸
transforms.Grayscale(num_output_channels=3), # 将图像转换为3通道的灰度图像
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
subset_indices = range(1000) # 选择前1000个示例作为子集
subset_sampler = torch.utils.data.SubsetRandomSampler(subset_indices)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, sampler=subset_sampler, num_workers=0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 加载预训练的VGG-16模型
model = vgg16(pretrained=True)
# 修改第一层卷积层的输入通道数
model.features[0] = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
# 修改最后一层全连接层的输出维度
model.classifier[6] = nn.Linear(4096, 10)
model.to(device)
model.eval() # 切换为评估模式
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
print('Accuracy of the VGG-16 model on the MNIST test images: %.2f %%' % accuracy)