import numpy as np
import pickle
import matplotlib.pyplot as plt
def loadfile():
train_data = []
train_label = []
val_data = []
val_label = []
test_data = []
test_label = []
for i in range(1, 6):
train_file = open('F:\PycharmProjects\cs231n\\two_layer_neuralnet\cifar-10-batches-py\data_batch_'+str(i),'rb')
train_file_object = pickle.load(train_file,encoding='bytes')
for line in train_file_object[b'data']:
train_data.append(line)
for line in train_file_object[b'labels']:
train_label.append(line)
train_data = np.array(train_data)
train_label = np.array(train_label)
subtraintoval = np.random.choice(train_data.shape[0],int(0.1*train_data.shape[0]))
print(len(subtraintoval))
val_data = train_data[subtraintoval].astype("float")
val_label = train_label[subtraintoval]
train_data = np.delete(train_data,subtraintoval,0)
train_label = np.delete(train_label,subtraintoval,0)
train_data = train_data.astype("float")
test_file = open('F:\PycharmProjects\cs231n\\two_layer_neuralnet\cifar-10-batches-py\\test_batch', 'rb')
test_file_object = pickle.load(test_file, encoding='bytes')
# print(test_file_object)
for line in test_file_object[b'data']:
test_data.append(line)
for line in test_file_object[b'labels']:
test_label.append(line)
test_data = np.array(test_data).astype("float")
test_label = np.array(test_label)
print("train_data shape:" + str(train_data.shape))
print("train_label shape:" + str(train_label.shape))
print("val_data shape:" + str(val_data.shape))
print("val_label shape:" + str(val_label.shape))
print("test_data shape:" + str(test_data.shape))
print("test_label shape:" + str(test_label.shape))
return train_data,train_label,val_data,val_label,test_data,test_label
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: A