Pytorch Fashion_MNIST直接离线加载二进制文件到pytorch

说明:Fashion_MNIST直接离线加载二进制文件到pytorch


'''
将4个gz直接加载到pytoch用来训练
    t10k-images-idx3-ubyte.gz
    t10k-labels-idx1-ubyte.gz
    train-images-idx3-ubyte.gz
    train-labels-idx1-ubyte.gz
'''

import os
import numpy as np
import gzip
import matplotlib.pyplot as plt

import torch
import torch.utils.data as Data
from torchvision import datasets, transforms
from torch.autograd import Variable

import time



dataPath = 'E:/fashion_binary_gz/'

# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

batch_size = 4


def load_data(data_folder, data_name, label_name):
    """
        data_folder: 文件目录
        data_name: 数据文件名
        label_name:标签数据文件名
    """
    with gzip.open(os.path.join(data_folder,label_name), 'rb') as lbpath: # rb表示的是读取二进制数据
        y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(os.path.join(data_folder,data_name), 'rb') as imgpath:
        x_train = np.frombuffer(
            imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
    return (x_train, y_train)



class DealDataset(Data.Dataset):
    """
        读取数据、初始化数据
    """
    def __init__(self, folder, data_name, label_name,transform=None):
        (train_set, train_labels) = load_data(folder, data_name, label_name) # 其实也可以直接使用torch.load(),读取之后的结果为torch.Tensor形式
        self.train_set = train_set
        self.train_labels = train_labels
        self.transform = transform

    def __getitem__(self, index):

        img, target = self.train_set[index], int(self.train_labels[index])
        if self.transform is not None:
            img = self.transform(img)
        return img, target

    def __len__(self):
        return len(self.train_set)


# 实例化这个类,然后我们就得到了Dataset类型的数据,记下来就将这个类传给DataLoader,就可以了。
trainDataset = DealDataset(dataPath,
                           "train-images-idx3-ubyte.gz",
                           "train-labels-idx1-ubyte.gz",
                           transform=transforms.ToTensor())

testDataset = DealDataset(dataPath,
                          "t10k-images-idx3-ubyte.gz",
                          "t10k-labels-idx1-ubyte.gz",
                          transform=transforms.ToTensor())

# 训练数据和测试数据的装载
train_loader = Data.DataLoader(
    dataset=trainDataset,
    batch_size=100, # 一个批次可以认为是一个包,每个包中含有100张图片
    shuffle=False,
)

test_loader = Data.DataLoader(
    dataset=testDataset,
    batch_size=100,
    shuffle=False,
)


if __name__ == '__main__':


    # 这里trainDataset包含:train_labels, train_set等属性;  数据类型均为ndarray
    print(f'trainDataset.train_labels.shape:{trainDataset.train_labels.shape}\n')
    print(f'trainDataset.train_set.shape:{trainDataset.train_set.shape}\n')


    # 这里train_loader包含:batch_size、dataset等属性,数据类型分别为int,DealDataset
    # dataset中又包含train_labels, train_set等属性;  数据类型均为ndarray
    print(f'train_loader.batch_size: {train_loader.batch_size}\n')
    print(f'train_loader.dataset.train_labels.shape: {train_loader.dataset.train_labels.shape}\n')
    print(f'train_loader.dataset.train_set.shape: {train_loader.dataset.train_set.shape}\n')



    dataiter = iter(train_loader)
    images, labels = dataiter.next()
    images = images.numpy()

    classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
    # plot the images in the batch, along with the corresponding labels
    fig = plt.figure(figsize=(25, 4))
    for idx in np.arange(batch_size):
        ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
        # ax.imshow(np.squeeze(images[idx]), cmap='gray')
        ax.imshow(np.squeeze(images[idx]), cmap='gray')
        ax.set_title(classes[labels[idx]])
    plt.show()

 

运行结果

显示图像

好的,以下是使用PyTorchFashion MNIST数据集进行分类的示例代码: 首先,我们需要导入必要的库和模块: ```python import torch import torch.nn as nn import torch.optim as optim import torchvision.datasets as datasets import torchvision.transforms as transforms from torch.utils.data import DataLoader ``` 然后,我们需要下载并加载数据集。Fashion MNIST数据集可以通过以下方式下载: ```python train_data = datasets.FashionMNIST( root="data", train=True, download=True, transform=transforms.ToTensor() ) test_data = datasets.FashionMNIST( root="data", train=False, download=True, transform=transforms.ToTensor() ) ``` 接下来,我们需要定义一个神经网络模型。在这个例子中,我们使用了一个简单的卷积神经网络: ```python class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2) ) self.layer2 = nn.Sequential( nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2) ) self.fc = nn.Sequential( nn.Linear(7 * 7 * 64, 128), nn.ReLU(), nn.Linear(128, 10) ) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = out.reshape(out.size(0), -1) out = self.fc(out) return out ``` 然后,我们需要定义损失函数和优化器: ```python model = CNN() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) ``` 最后,我们可以开始训练模型并评估其性能: ```python train_loader = DataLoader(train_data, batch_size=100, shuffle=True) test_loader = DataLoader(test_data, batch_size=100, shuffle=False) for epoch in range(10): for i, (images, labels) in enumerate(train_loader): optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (i + 1) % 100 == 0: print(f"Epoch [{epoch + 1}/{10}], Step [{i + 1}/{len(train_loader)}], Loss: {loss.item():.4f}") with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() accuracy = 100 * correct / total print(f"Test Accuracy: {accuracy:.2f}%") ``` 这就是使用PyTorchFashion MNIST数据集进行分类的示例代码。希望能对你有所帮助!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值