import torch
import torch.nn as nn
import torchvision.models as models
import matplotlib.pyplot as plt
import torchvision.datasets
import torchvision.transforms as transforms
from collections.abc import Iterable
# 检测是否可以使用GPU
from torch.utils.data import DataLoader
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
cuda_is_available = torch.cuda.is_available()
# 设置一些超参数
BATCHSIZE = 100
EPOCHS = 15
LR = 0.001
# 对数据进行处理
transform = transforms.Compose([
#transforms.Resize(256), 不确定是否需要resize
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
])
# 加载数据集
train_data = torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=transform,download=True)
test_data = torchvision.datasets.CIFAR10(root="./dataset",train=Fals
VGG16_GPU
最新推荐文章于 2024-02-19 20:55:09 发布