CS231n Assignment2--Fully-connected Neural Network

本文档记录了CS231n课程中关于全连接神经网络的Assignment2,主要内容是实现并保存了一个全连接神经网络的代码,详细注释辅助理解。
摘要由CSDN通过智能技术生成

课程网址:http://cs231n.github.io/assignments2016/assignment2/


主要目的是保存一下一个比较完整的全连接神经网络代码,不带说明了,代码说明也比较详细。


dataset.py

# -*- coding: utf-8 -*-

import numpy as np

def unpickle(file):
    import cPickle
    fo = open(file, 'rb')
    dict = cPickle.load(fo)
    fo.close()
    return dict

#load dataset of cifar10
def load_CIFAR10(cifar10_dir):
    #get the training data
    X_train = []
    y_train = []
    for i in range(1,6):
        dic = unpickle(cifar10_dir+"\\data_batch_"+str(i))
        for item in dic["data"]:
            X_train.append(item)
        for item in dic["labels"]:
            y_train.append(item)
            
    #get test data
    X_test = []
    y_test = []
    #do not know why the path is not just right as above,add a extra\
    dic = unpickle(cifar10_dir+"\\test_batch")
    for item in dic["data"]:
       X_test.append(item)
    for item in dic["labels"]:
       y_test.append(item)
    
    X_train = np.asarray(X_train)
    y_train = np.asarray(y_train)
    X_test = np.asarray(X_test)
    y_test = np.array(y_test)
    return X_train, y_train, X_test, y_test

def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):  
    """ 
    Load the CIFAR-10 dataset from disk and perform preprocessing to prepare 
    it for the linear classifier. These are the same steps as we used for the SVM, 
    but condensed to a single function.  
    """  
    # Load the raw CIFAR-10 data 
    cifar10_dir = 'E:\python\cs231n\cifar-10-batches-py'   # make a change
    X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)  
    # Subsample the data    
    mask = range(num_training, num_training + num_validation)    
    X_val = X_train[mask]                   # (1000,32,32,3)    
    y_val = y_train[mask]                   # (1000L,)   
    mask = range(num_training)    
    X_train = X_train[mask]                 # (49000,32,32,3)    
    y_train = y_train[mask]                 # (49000L,)    
    mask = range(num_test)   
    X_test = X_test[mask]                   # (1000,32,32,3)    
    y_test = y_test[mask]                   # (1000L,)    

    # preprocessing: subtract the mean image    
    mean_image = np.mean(X_train, axis=0)    
    X_train -= mean_image   
    X_val -= mean_image    
    X_test -= mean_image    

    # Reshape data to rows    
    X_train = X_train.reshape(num_training, -1)      # (49000,3072)    
    X_val = X_val.reshape(num_validation, -1)        # (1000,3072)    
    X_test = X_test.reshape(num_test, -1)            # (1000,3072)   
    
    data = {}
    data['X_train'] = X_train
    data['y_train'] = y_train
    data['X_val'] = X_val
    data['y_val'] = y_val
    data['X_test'] = X_test
    data['y_test'] = y_test

    return data


fc_net.py

# -*- coding: utf-8 -*-

__coauthor__ = 'Andrew'
# 2.25.2017 #

import  numpy as np
import layers

class TwoLayerNet(object):   
    """    
    A two-layer fully-connected neural network with ReLU nonlinearity and    
    softmax loss that uses a modular layer design. We assume an input dimension    
    of D, a hidden dimension of H, and perform classification over C classes.    

    The architecure should be affine - relu - affine - softmax.    

    Note that this class does not implement gradient descent; instead, it    
    will interact with a separate Solver object that is responsible for running    
    optimization.    

    The learnable parameters of the model are stored in the dictionary    
    self.params that maps parameter names to numpy arrays.   
    """
    def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,           
                              weight_scale=1e-3, reg=0.0):    
        """    
        Initialize a new network.   
        Inputs:    
        - input_dim: An integer giving the size of the input    
        - hidden_dim: An integer giving the size of the hidden layer    
        - num_classes: An integer giving the number of classes to classify    
        - dropout: Scalar between 0 and 1 giving dropout strength.    
        - weight_scale: Scalar giving the standard deviation for random 
                        initialization of the weights.    
        - reg: Scalar giving L2 regularization strength.    
        """    
        self.params = {}    
        self.reg = reg   
        self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)     
        self.params['b1'] = np.zeros((1, hidden_dim))    
        self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)  
        self.params['b2'] = np.zeros((1, num_classes))

    def loss(self, X, y=None):    
        """   
        Compute loss and gradient for a minibatch of data.    
        Inputs:    
        - X: Array of input data of shape (N, d_1, ..., d_k)    
        - y: Array of labels, of shape (N,). y[i] gives the label for X[i].  
        Returns:   
        If y is None, then run a test-time forward pass of the model and retur
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值