Python3.0导入CIFAR-10数据集
前言
在搞完本科毕业设计之后终于有时间可以重新开始年前的关于深度学习的一些知识的学习了,年前其实一直因为python的版本问题导致无法很好的解决CIFAR-10数据集的导入问题(windows10)。在这段时间的学习后,终于可以攻克这一难题了,现将经验总结如下。
本次笔记采用参考书是《深度学习实战》,杨云,杜飞著-北京:清华大学出版社,2018版本
实现的目的:成功在python3.0以上版本中实现对CIFAR-10数据集的导入工作。
load_CIFAR10函数
将load_CIFAR10(root)函数封装在名为data_utils.py的模块库中,置于python的默认路径之下:
首先得下好了imageio,numpy等包,此项操作在pycharm中较好实现
将以下代码封装至data_utils.py文件中作为模块文件
import pickle
import numpy as np
import os
from imageio import imread
def load_CIFAR_batch(filename):
with open(filename, 'rb') as f:
datadict = pickle.load(f,encoding='iso-8859-1')
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
cifar10_dir = 'datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32):
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.iteritems():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print ('loading training data for synset %d / %d' % (i + 1, len(wnids)))
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]] for img_file in img_files]
y_test = np.array(y_test)
return class_names, X_train, y_train, X_val, y_val, X_test, y_test
def load_models(models_dir):
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = pickle.load(f)['model']
except pickle.UnpicklingError:
continue
return models
随后再将其封装到文件夹utils中,存放与python的路径之下:
调用cifar-10的数据集的数据
下载链接:
下载解压完的数据保存至python的路径如下:
'D:\Anaconda3\envs\PythonExamples\Lib\cifar-10-batches-py'
以下是显示数据信息的脚本:
import numpy as np
import random
from utils.data_utils import load_CIFAR10
from classifiers.chapter2 import *
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10.0,8.0)
cifar10_dir = 'D:\Anaconda3\envs\PythonExamples\Lib\cifar-10-batches-py'
X_train,Y_train,X_test,Y_test = load_CIFAR10(cifar10_dir)
#以下是数据可视化操作
classes = ['plane','car','bird','cat','deer','dog','frog','horse','ship']
num_classes = len(classes)
sample_per_classes = 7
for y,cls in enumerate(classes):
idxs = np.flatnonzero(Y_train == y)
idxs = np.random.choice(idxs,sample_per_classes,replace=False)
for i,idx in enumerate(idxs):
plt_idx = i*num_classes+y+1
plt.subplot(sample_per_classes,num_classes,plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()