深度学习Dataset类

 蚂蚁蜜蜂分类数据集和下载连接https://download.pytorch.org/tutorial/hymenoptera_data.zip

from torch.utils.data import Dataset
from PIL import Image
import os
# 继承Dataset
class MyData(Dataset):
    # 初始化 为整个class提供全局变量
    def __init__(self,root_dir,label_dir):
        self.root_dir = root_dir
        self.label_dir = label_dir
        # 获取图片路径地址 'dataset/train\\ants'
        self.path = os.path.join(self.root_dir,self.label_dir)
        # 将文件夹下所有地址变成一个列表 img_path_list[0] = '0013035.jpg'
        self.img_path = os.listdir(self.path)

    def __getitem__(self, idx):
        img_name = self.img_path[idx]
        # 每个图片路径
        img_item_path = os.path.join(self.root_dir,self.label_dir,img_name)
        # 读取图片
        img = Image.open(img_item_path)
        label = self.label_dir
        return img,label

    def __len__(self):
        # 列表的长度
        return len(self.img_path)

root_dir = "dataset/train"
ants_label_dir = "ants"
bees_label_dir = "bees"
ants_dataset = MyData(root_dir,ants_label_dir)
bees_dataset = MyData(root_dir,bees_label_dir)
# image,label = ants_dataset[0] img.show() 打开图片
train_dataset = ants_dataset + bees_dataset
  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用Python进行深度学习土地分的示例: ```python import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from torchvision.datasets import ImageFolder from torchvision.transforms import ToTensor, Resize, Normalize from torchvision.models import resnet50 # 设置随机种子 torch.manual_seed(42) # 定义数据预处理和增强的转换 transform = nn.Sequential( Resize((224, 224)), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ) # 加载训练集和验证集 train_dataset = ImageFolder('train_data_path', transform=transform) val_dataset = ImageFolder('val_data_path', transform=transform) # 创建数据加载器 train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=32) # 定义模型 model = resnet50(pretrained=True) model.fc = nn.Linear(2048, num_classes) # 替换最后一层全连接层,num_classes是分别数 # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # 训练模型 num_epochs = 10 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) for epoch in range(num_epochs): model.train() train_loss = 0.0 train_correct = 0 for images, labels in train_loader: images = images.to(device) labels = labels.to(device) optimizer.zero_grad() outputs = model(images) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) loss.backward() optimizer.step() train_loss += loss.item() * images.size(0) train_correct += torch.sum(preds == labels.data) train_loss = train_loss / len(train_dataset) train_acc = train_correct.double() / len(train_dataset) model.eval() val_loss = 0.0 val_correct = 0 with torch.no_grad(): for images, labels in val_loader: images = images.to(device) labels = labels.to(device) outputs = model(images) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) val_loss += loss.item() * images.size(0) val_correct += torch.sum(preds == labels.data) val_loss = val_loss / len(val_dataset) val_acc = val_correct.double() / len(val_dataset) print(f'Epoch {epoch+1}/{num_epochs}, Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}') # 使用训练好的模型进行预测 test_dataset = ImageFolder('test_data_path', transform=transform) test_loader = DataLoader(test_dataset, batch_size=32) model.eval() predictions = [] with torch.no_grad(): for images, _ in test_loader: images = images.to(device) outputs = model(images) _, preds = torch.max(outputs, 1) predictions.extend(preds.cpu().numpy()) # 打印预测结果 class_names = train_dataset.classes for i, pred in enumerate(predictions): print(f'Image {i+1}: {class_names[pred]}') ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值