使用Python解析MNIST数据集(IDX文件格式)(转载)

 
import numpy as np
import struct
import matplotlib.pyplot as plt

# 训练集文件
train_images_idx3_ubyte_file = 'E:\datasets\MNIST_data\\train\\train-images.idx3-ubyte'
# 训练集标签文件
train_labels_idx1_ubyte_file = 'E:\datasets\MNIST_data\\train\\train-labels.idx1-ubyte'

# 测试集文件
test_images_idx3_ubyte_file = 'E:\datasets\MNIST_data\\test\\t10k-images.idx3-ubyte'
# 测试集标签文件
test_labels_idx1_ubyte_file = 'E:\datasets\MNIST_data\\test\\t10k-labels.idx1-ubyte'


def decode_idx3_ubyte(idx3_ubyte_file):
    """
    解析idx3文件的通用函数
    :param idx3_ubyte_file: idx3文件路径
    :return: 数据集
    """
    # 读取二进制数据
    bin_data = open(idx3_ubyte_file, 'rb').read()

    # 解析文件头信息,依次为魔数、图片数量、每张图片高、每张图片宽
    offset = 0
    fmt_header = '>iiii'
    magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
    print ('魔数:%d, 图片数量: %d张, 图片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))

    # 解析数据集
    image_size = num_rows * num_cols
    offset += struct.calcsize(fmt_header)
    fmt_image = '>' + str(image_size) + 'B'
    images = np.empty((num_images, num_rows, num_cols))
    for i in range(num_images):
        if (i + 1) % 10000 == 0:
            print ('已解析 %d' % (i + 1) + '张')
        images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
        offset += struct.calcsize(fmt_image)
    return images


def decode_idx1_ubyte(idx1_ubyte_file):
    """
    解析idx1文件的通用函数
    :param idx1_ubyte_file: idx1文件路径
    :return: 数据集
    """
    # 读取二进制数据
    bin_data = open(idx1_ubyte_file, 'rb').read()

    # 解析文件头信息,依次为魔数和标签数
    offset = 0
    fmt_header = '>ii'
    magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
    print ('魔数:%d, 图片数量: %d张' % (magic_number, num_images))

    # 解析数据集
    offset += struct.calcsize(fmt_header)
    fmt_image = '>B'
    labels = np.empty(num_images)
    for i in range(num_images):
        if (i + 1) % 10000 == 0:
            print ('已解析 %d' % (i + 1) + '张')
        labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
        offset += struct.calcsize(fmt_image)
    return labels


def load_train_images(idx_ubyte_file=train_images_idx3_ubyte_file):
    """
    TRAINING SET IMAGE FILE (train-images-idx3-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000803(2051) magic number
    0004     32 bit integer  60000            number of images
    0008     32 bit integer  28               number of rows
    0012     32 bit integer  28               number of columns
    0016     unsigned byte   ??               pixel
    0017     unsigned byte   ??               pixel
    ........
    xxxx     unsigned byte   ??               pixel
    Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).

    :param idx_ubyte_file: idx文件路径
    :return: n*row*col维np.array对象,n为图片数量
    """
    return decode_idx3_ubyte(idx_ubyte_file)


def load_train_labels(idx_ubyte_file=train_labels_idx1_ubyte_file):
    """
    TRAINING SET LABEL FILE (train-labels-idx1-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000801(2049) magic number (MSB first)
    0004     32 bit integer  60000            number of items
    0008     unsigned byte   ??               label
    0009     unsigned byte   ??               label
    ........
    xxxx     unsigned byte   ??               label
    The labels values are 0 to 9.

    :param idx_ubyte_file: idx文件路径
    :return: n*1维np.array对象,n为图片数量
    """
    return decode_idx1_ubyte(idx_ubyte_file)


def load_test_images(idx_ubyte_file=test_images_idx3_ubyte_file):
    """
    TEST SET IMAGE FILE (t10k-images-idx3-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000803(2051) magic number
    0004     32 bit integer  10000            number of images
    0008     32 bit integer  28               number of rows
    0012     32 bit integer  28               number of columns
    0016     unsigned byte   ??               pixel
    0017     unsigned byte   ??               pixel
    ........
    xxxx     unsigned byte   ??               pixel
    Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).

    :param idx_ubyte_file: idx文件路径
    :return: n*row*col维np.array对象,n为图片数量
    """
    return decode_idx3_ubyte(idx_ubyte_file)


def load_test_labels(idx_ubyte_file=test_labels_idx1_ubyte_file):
    """
    TEST SET LABEL FILE (t10k-labels-idx1-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000801(2049) magic number (MSB first)
    0004     32 bit integer  10000            number of items
    0008     unsigned byte   ??               label
    0009     unsigned byte   ??               label
    ........
    xxxx     unsigned byte   ??               label
    The labels values are 0 to 9.

    :param idx_ubyte_file: idx文件路径
    :return: n*1维np.array对象,n为图片数量
    """
    return decode_idx1_ubyte(idx_ubyte_file)




def run():
    train_images = load_train_images()
    train_labels = load_train_labels()
    # test_images = load_test_images()
    # test_labels = load_test_labels()

    # 查看前十个数据及其标签以读取是否正确
    for i in range(2):
        print (train_labels[i])
        plt.imshow(train_images[i], cmap='gray')
        plt.show()
    print ('done')

if __name__ == '__main__':
    run()

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,我可以为您提供一个简单的bpnn算法实现手写数字识别的Python代码。这个代码使用mnist数据集,但是没有使用任何深度学习框架,完全是手写的代码。代码如下: ```python import numpy as np import struct def load_mnist_data(data_path): with open(data_path + 'train-images.idx3-ubyte', 'rb') as f: train_images = f.read() with open(data_path + 'train-labels.idx1-ubyte', 'rb') as f: train_labels = f.read() with open(data_path + 't10k-images.idx3-ubyte', 'rb') as f: test_images = f.read() with open(data_path + 't10k-labels.idx1-ubyte', 'rb') as f: test_labels = f.read() # 解析数据集 train_images = parse_images(train_images) train_labels = parse_labels(train_labels) test_images = parse_images(test_images) test_labels = parse_labels(test_labels) return train_images, train_labels, test_images, test_labels def parse_images(data): magic, num, rows, cols = struct.unpack_from('>IIII', data) offset = struct.calcsize('>IIII') images = np.empty((num, rows, cols)) for i in range(num): image = struct.unpack_from('>784B', data, offset) offset += struct.calcsize('>784B') images[i] = np.array(image).reshape(28, 28) return images def parse_labels(data): magic, num = struct.unpack_from('>II', data) offset = struct.calcsize('>II') labels = np.empty(num) for i in range(num): label = struct.unpack_from('>B', data, offset) offset += struct.calcsize('>B') labels[i] = label return labels def sigmoid(x): return 1.0 / (1.0 + np.exp(-x)) def sigmoid_derivative(x): return x * (1.0 - x) class NeuralNetwork: def __init__(self, input_size, hidden_size, output_size): self.weights1 = np.random.randn(input_size, hidden_size) self.weights2 = np.random.randn(hidden_size, output_size) def forward(self, x): self.z2 = np.dot(x, self.weights1) self.a2 = sigmoid(self.z2) self.z3 = np.dot(self.a2, self.weights2) y_hat = sigmoid(self.z3) return y_hat def backward(self, x, y, y_hat, learning_rate): delta3 = (y_hat - y) * sigmoid_derivative(y_hat) d_weights2 = np.dot(self.a2.T, delta3) delta2 = np.dot(delta3, self.weights2.T) * sigmoid_derivative(self.a2) d_weights1 = np.dot(x.T, delta2) self.weights1 -= learning_rate * d_weights1 self.weights2 -= learning_rate * d_weights2 def train(self, x, y, learning_rate): y_hat = self.forward(x) self.backward(x, y, y_hat, learning_rate) def predict(self, x): y_hat = self.forward(x) return np.argmax(y_hat, axis=1) if __name__ == '__main__': train_images, train_labels, test_images, test_labels = load_mnist_data('data/mnist/') train_images = train_images.reshape(train_images.shape[0], -1) test_images = test_images.reshape(test_images.shape[0], -1) input_size = train_images.shape[1] hidden_size = 100 output_size = 10 nn = NeuralNetwork(input_size, hidden_size, output_size) num_epochs = 100 batch_size = 100 learning_rate = 0.1 num_batches = train_images.shape[0] // batch_size for epoch in range(num_epochs): for i in range(num_batches): batch_start = i * batch_size batch_end = batch_start + batch_size x = train_images[batch_start:batch_end] y = np.zeros((batch_size, output_size)) y[np.arange(batch_size), train_labels[batch_start:batch_end].astype(int)] = 1 nn.train(x, y, learning_rate) train_acc = np.mean(nn.predict(train_images) == train_labels) test_acc = np.mean(nn.predict(test_images) == test_labels) print('Epoch: %d, Train accuracy: %f, Test accuracy: %f' % (epoch, train_acc, test_acc)) ``` 这个代码的思路是:首先加载mnist数据集,然后定义一个包含一个隐层的神经网络,使用随机初始化的权重对神经网络进行训练。训练时每次读入一个batch的数据,然后使用反向传播算法对权重进行更新。最后输出训练和测试集上的准确率。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值