提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
前言
关于GPU占用率的问题,两部分代码都在下面了,不知道哪里有问题,大家帮我看看,什么numworks,pin_memory都试过没用
一、Dataloader
代码如下:
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision import transforms
import os
import torch
import matplotlib.pyplot as plt
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class MyDataset(Dataset): # 继承了Dataset子类
def __init__(self, input_root, label_root, transform=None):
self.input_root = input_root
self.input_files = os.listdir(input_root) # 列出指定路径下的所有文件
self.label_root = label_root
self.label_files = os.listdir(label_root)
self.transforms = transform
def __len__(self):
# 获取数据集大小
return len(self.input_files)
def __getitem__(self, index):
# 根据索引(id)读取对应的图片
input_img_path = os.path.join(self.input_root, self.input_files[index])
input_img = Image.open(input_img_path).convert('L')
label_img_path = os.path.join(self.label_root, self.label_files[index])
label_img = Image.open(label_img_path).convert('L')
if self.transforms:
# transforms方法如果有就先处理,然后再返回最后结果
input_img = self.transforms(input_img)
label_img = self.transforms(label_img)
return input_img, label_img
trianinput_root = 'E:/turbulence compensation/Matlab_Data/LJM_data/all_I_intensity/train_70'
trainlabel_root = 'E:/turbulence compensation/Matlab_Data/LJM_data/all_phase_screens/train_70'
validinput_root = 'E:/turbulence compensation/Matlab_Data/LJM_data/all_I_intensity/vali_20'
validlabel_root = 'E:/turbulence compensation/Matlab_Data/LJM_data/all_phase_screens/vali_20'
testinput_root = 'E:/turbulence compensation/Matlab_Data/LJM_data/all_I_intensity/test_10'
testlabel_root = 'E:/turbulence compensation/Matlab_Data/LJM_data/all_phase_screens/test_10'
transform = transforms.Compose([
transforms.Resize((64, 64)),
# transforms.CenterCrop((500, 500)) ,
transforms.ToTensor()
])
train_dataset = MyDataset(trianinput_root, trainlabel_root, transform)
valid_dataset = MyDataset(validinput_root, validlabel_root, transform)
test_dataset = MyDataset(testinput_root, testlabel_root, transform)
train_dataloader = DataLoader(train_dataset, batch_size=128, shuffle=True)
valid_dataloader = DataLoader(valid_dataset, batch_size=128, shuffle=False)
test_dataloader = DataLoader(test_dataset, batch_size=2, shuffle=False)
if __name__ == '__main__':# 使用示例
# show_images(dataset, 0) # 显示索引为0的图片
for range, labels, ni, np in train_dataloader:
print(range, range.shape)
# print(doppler.shape)
print(ni)
print(np)
二. train部分代码
代码如下:
import torch.optim as optim
import random
from DataLoader import *
from Network import *
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/experiment_name')
#设置随机种子
seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) if torch.cuda.is_available() else None
np.random.seed(seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = CustomCNN().to(device)
criterion = CustomLoss(a=2.0).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
# scheduler = StepLR(optimizer, step_size = 50, gamma=0.1)
# 4. Train the network
num_epochs = 100
for epoch in range(1, num_epochs+1):
model.train() # 设置模型为训练模式
running_loss = 0.0
for i, (inputs, labels) in enumerate(train_dataloader):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Forward pass
outputs = model(inputs)
loss = criterion(outputs, labels)
# print('batch number loss {}:'.format(i), loss)
# Ba.ckward pass and optimization
loss.backward()
optimizer.step()
#ccc
running_loss += loss.item()
train_loss = running_loss / len(train_dataloader)
print('=====train each epoch all loss {}:'.format(epoch), train_loss)
writer.add_scalar('Loss/train',train_loss,epoch)
# 5. Compute accuracy on training and testing datasets
model.eval() # Set the model to evaluation mode
with torch.no_grad():
valid_loss = 0
for i, (inputs, labels) in enumerate(valid_dataloader):
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
valid_loss += loss.item()
valid_loss_every_epochaverage = valid_loss / len(valid_dataloader)
print('-----valid each epoch all loss {}:'.format(epoch), valid_loss_every_epochaverage)
writer.add_scalar('Loss/Valid', valid_loss_every_epochaverage, epoch)
if epoch % 50 == 0:
save_path = f'./save_model/com_epoch_{epoch}.pth'
torch.save(model.state_dict(), save_path)
writer.close()