[转载] python(numpy) 实现神经网络训练 (卷积 全连接 池化)

参考链接: Python中的numpy.flipud

cnn_numpy 

使用numpy实现神经网络,在mnist上进行训练、测试 

目前包括已下算子: 

 卷积 Conv2D(name="conv2",in_channels= 6, out_channels= 12,kernel_size=3,stride=1,padding=1)  全连接 FC FC(name="full1",in_channels=28*28, out_channels= 512)  池化  

MaxPollingAvgPolling MaxPooling('pool2',ksize=2,stride=2), # 7*7*32 AvgPolling('pool2',ksize=2,stride=2), # 7*7*32 

激活函数 

SigmoidReluTanhSoftmax 

损失函数 

CE 交叉熵损失MSE 均方根损失 

构建网络例子 

# 构建一个 conv+fc 的网络

class Net(Module):

    def __init__(self):

        super(Net,self).__init__()

        

        self.layers = [

            Conv2D(name="conv1",in_channels= 1, out_channels= 6,kernel_size=3,stride=1,padding=1), # 1*28*28

            MaxPooling('pool1',ksize=2,stride=2), # 6*14*14

            Tanh(name='relu'),

 

            Conv2D(name="conv2",in_channels= 6, out_channels= 12,kernel_size=3,stride=1,padding=1),

            MaxPooling('pool2',ksize=2,stride=2), # 12*7*7

            Tanh(name='relu2'),

 

            FC(name="full1",in_channels= 12*7*7 , out_channels= 512),

            Tanh(name="sigmoid1"),

            FC(name="full2",in_channels=512,out_channels=128),

            Tanh(name="sigmoid2"),

            FC(name="full3",in_channels=128,out_channels=10),

        ]

 

Layer Module 基类定义 

class Layers():

    def __init__(self,name):

        self.name = name

    # 前向

    def forward(self,x):

        pass

       # 梯度置零

    def zero_grad(self):

        pass

    # 后向

    def backward(self,grad_out):

        pass

    # 参数更新

    def update(self,lr=1e-3):

        pass

 

 

class Module():

    def __init__(self):

        self.layers = []  # 所有的Layer

 

    def forward(self,x):

        for layer in self.layers:

            x = layer.forward(x)

        return x

 

    def backward(self,grad):

        for layer in reversed(self.layers):

            layer.zero_grad()

            grad = layer.backward(grad)

    

    def step(self,lr=1e-3):

        for layer in reversed(self.layers):

            layer.update(lr)

 

Layer 层需要实现前向forward 、 反传backward 、置零梯度zero_grad 、更新参数update功能,Module 主要用来 拼接所有的Layer, 实现流程化的前向预测 后向训练 

FC层 

import numpy as np

from module import Layers

 

class FC(Layers):

    def __init__(self,name,in_channels,out_channels):

        super(FC,self).__init__(name)

        self.weights = np.random.standard_normal((in_channels,out_channels))

        self.bias = np.zeros(out_channels)

 

        self.grad_w = np.zeros((in_channels,out_channels))

        self.grad_b = np.zeros(out_channels)

 

    def forward(self, input):

        self.in_shape = input.shape 

        input = np.reshape(input,(input.shape[0],-1)) # flat

        self.input = input

        return np.dot(input,self.weights)+self.bias

 

    def backward(self,grad_out):

        N = grad_out.shape[0]

        dx = np.dot(grad_out,self.weights.T)

 

        self.grad_w = np.dot(self.input.T,grad_out)

        self.grad_b = np.sum(grad_out,axis=0)

 

        return dx.reshape(self.in_shape)

    

    def zero_grad(self):

        self.grad_w.fill(0)

        self.grad_b.fill(0)

 

    def update(self,lr=1e-3):

        self.weights -= lr*self.grad_w  

        self.bias -= lr*self.grad_b

 

FC测试 

使用单个fc层 模拟 y=wx+b 函数 

if __name__ == '__main__':

    # 初始化一个 固定的 w b, 然后 根据w b 生成 训练数据

    w = np.random.randn(100,1)

    b = np.random.randn(1)

    x_data = np.random.randn(500,100)  # data

    label = np.dot(x_data,w)+b  # label

        

    layer = FC('fc',100,1)

 

    for i in range(10000):

        index = i%(500-10)

        x = x_data[index:index+10] # batch = 10

        y = label[index:index+10]  

 

        out = layer.forward(x)

        # mse loss 

        loss = np.mean(np.sum(np.square(out-y),axis=-1))

        # mse_loss 的 梯度  

        dy = out-y

        layer.zero_grad()

        grad = layer.backward(dy)

        layer.update(1e-3)  # 更新参数

 

        if i%1000==0:

            print(loss)

 

Conv2d 

import numpy as np

from module import Layers

 

class Conv2D(Layers):

    def __init__(self,name,in_channels,out_channels, kernel_size, stride,padding,bias=True):

        super(Conv2D,self).__init__(name)

 

        self.in_channels = in_channels

        self.out_channels = out_channels

        self.ksize = kernel_size

        self.stride = stride

        self.padding = padding

        

        self.weights = np.random.standard_normal((out_channels,in_channels,kernel_size,kernel_size))

        self.bias = np.zeros(out_channels)

        

        self.grad_w = np.zeros(self.weights.shape)

        self.grad_b = np.zeros(self.bias.shape)

    '''  另一种实现方式,效率比较低,多重循环 嵌套

    def _sing_conv(self,x):

        x = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding)),'constant',constant_values=0)

        b,c,h,w = x.shape

        oh = (h-self.ksize)//self.stride +1

        ow = (w-self.ksize)//self.stride +1

        out = np.zeros((b,self.out_channels,oh,ow))

        for n in range(b):

            for d in range(self.out_channels):

                for i in range(0,oh,1):

                    for j in range(0,ow,1):

                        _x = i*self.stride

                        _y = j*self.stride

                        out[n,d,i,j] = np.sum(x[n,:,_x:_x+self.ksize,_y:_y+self.ksize]*self.weights[d,:,:,:])#+self.bias[d]

        return out

    '''

    def forward(self,x):

        self.x = x

        weights = self.weights.reshape(self.out_channels,-1) # o,ckk

 

        x = np.pad(x,((0,0),(0,0),(self.padding,self.padding),(self.padding,self.padding)),'constant',constant_values=0)

        b,c,h,w = x.shape

 

        self.out = np.zeros((b,self.out_channels,(h-self.ksize)//self.stride +1,(w-self.ksize)//self.stride +1))

        

        self.col_img = self.im2col(x,self.ksize,self.stride) # bhw * ckk

        out = np.dot(weights,self.col_img.T).reshape(self.out_channels,b,-1).transpose(1,0,2)

        

        self.out = np.reshape(out,self.out.shape)

 

        return self.out

 

    def backward(self,grad_out):

        b,c,h,w = self.out.shape  # 

        

        grad_out_ = grad_out.transpose(1,0,2,3) #b,oc,h,w * (bhw , ckk)

        grad_out_flat = np.reshape(grad_out_, [self.out_channels, -1])

        

        self.grad_w = np.dot(grad_out_flat,self.col_img).reshape(self.grad_w.shape)

        self.grad_b = np.sum(grad_out_flat,axis=1)

        tmp = self.ksize - self.padding - 1

        grad_out_pad = np.pad(grad_out,((0,0),(0,0),(tmp,tmp),(tmp,tmp)),'constant',constant_values=0)

        

        flip_weights = np.flip(self.weights, (2, 3))

        # flip_weights = np.flipud(np.fliplr(self.weights)) # rot(180)

        flip_weights = flip_weights.swapaxes(0,1) # in oc

        col_flip_weights = flip_weights.reshape([self.in_channels,-1])

        

        weights = self.weights.transpose(1,0,2,3).reshape(self.in_channels,-1)

        

        col_grad = self.im2col(grad_out_pad,self.ksize,1) #bhw,ckk

        

        # (in,ckk) * (bhw,ckk).T 

        next_eta = np.dot(weights,col_grad.T).reshape(self.in_channels,b,-1).transpose(1,0,2) 

        

        next_eta = np.reshape(next_eta, self.x.shape)

       

        return next_eta

 

    def zero_grad(self):

        self.grad_w = np.zeros_like(self.grad_w)

        self.grad_b = np.zeros_like(self.grad_b)

 

    def update(self,lr=1e-3):

        self.weights -= lr*self.grad_w  

        self.bias -= lr*self.grad_b

 

    def im2col(self,x,k_size,stride):

        b,c,h,w = x.shape

        image_col = []

        for n in range(b):

            for i in range(0,h-k_size+1,stride):

                for j in range(0,w-k_size+1,stride):

                    col = x[n,:,i:i+k_size,j:j+k_size].reshape(-1)

                    image_col.append(col)

        

        return np.array(image_col)

 

# test_conv 

if __name__ == '__main__':

    x = np.random.randn(5,3,32,32)

    conv = Conv2D('conv1',3,12,4,1,1)

    y = conv.forward(x)

    print(y.shape)

    loss = y-(y+1)

    grad = conv.backward(loss)

    print(grad.shape)

 

实现主要借鉴了im2col的思想,将conv操作转换成 普通的矩阵乘操作(FC),这个就不介绍了,感觉有很多介绍的文章,大家感兴趣可以去看一下; 

Pooling 

import numpy as np

from module import Layers

 

class MaxPooling(Layers):

    def __init__(self, name, ksize=2, stride=2):

        super(MaxPooling,self).__init__(name)

        self.ksize = ksize

        self.stride = stride 

 

    def forward(self, x):

        n,c,h,w = x.shape

        out = np.zeros([n, c, h//self.stride,w//self.stride])

        self.index = np.zeros_like(x)

        for b in range(n):

            for d in range(c):

                for i in range(h//self.stride):

                    for j in range(w//self.stride):

                        _x = i*self.stride

                        _y = j*self.stride

                        out[b, d ,i , j] = np.max(

                            x[b, d ,_x:_x+self.ksize, _y:_y+self.ksize])

                        index = np.argmax(x[b, d ,_x:_x+self.ksize, _y:_y+self.ksize])

                        self.index[b,d,_x+index//self.ksize, _y+index%self.ksize] = 1

        return out

 

    def backward(self, grad_out):

        return np.repeat(np.repeat(grad_out, self.stride, axis=2), self.stride, axis=3) * self.index

 

# AvgPooling 在完整代码中有实现

 

激活函数 

这个求导很容易,大家可以从激活函数开始一点点理解 反向传播的整个过程 

from module import Layers

import numpy as np

 

class Relu(Layers):

    def __init__(self,name):

        super(Relu,self).__init__(name)

 

    def forward(self,input):

        self.input = input

        return np.maximum(input, 0)

 

    def backward(self,grad_out):

        grad_out[self.input<0]=0

        return grad_out

 

class Sigmoid(Layers):

    def __init__(self, name):

        super(Sigmoid,self).__init__(name)

    def forward(self,input):

        self.output = 1/(1+np.exp(-input))

        return self.output

    def backward(self,grad):

        grad = grad * self.output*(1-self.output)

        return grad

 

class Tanh(Layers):

    def __init__(self, name):

        super(Tanh,self).__init__(name)

    def forward(self,input):

        a = np.exp(input)

        b = np.exp(-input)

        self.output = (a-b)/(a+b)

        return self.output

    def backward(self,grad):

        grad = grad * (1-self.output*self.output)

        return grad

 

Loss 

from module import Layers

import numpy as np

 

def Softmax(input):

    vec_max = np.max(input,axis=1,keepdims=True)  

    input -= vec_max

    exp = np.exp(input) 

    softmax_pro = exp/np.sum(exp,axis=1,keepdims=True)

    return softmax_pro

 

class CrossEntropyLoss():

    def __init__(self, reduce='mean'):

        super(CrossEntropyLoss,self).__init__()

        self.reduce = reduce

 

    def __call__(self,pred,label):

        # self.softmax_p = Softmax(pred)

        self.softmax_p = pred

        self.real = label

        loss = 0

        for i in range(label.shape[0]):

            loss += -np.log( self.softmax_p[i,label[i]] )

        if self.reduce == 'mean':

            loss /= label.shape[0]

        grad = self.grad()

        return loss,grad

        

    def grad(self):

        grad = self.softmax_p.copy()

        for i in range(self.real.shape[0]):

            grad[i,self.real[i]] -= 1

        return grad 

        

class MSELoss():

    def __init__(self, reduce='mean'):

        super(MSELoss,self).__init__()

        self.reduce = reduce

 

    def __call__(self,pred,label):

        assert pred.shape == label.shape, 'pred and gt shape must be same'

        loss = np.sum(np.square((pred-label)),axis=-1)

        if self.reduce == 'mean':

            loss = np.mean(loss)

        else:

            loss = np.sum(loss)

        grad = (pred-label)

        return loss,grad

 

这个比较麻烦的就是CE的求导,其实最后推导完 其导数就是pi-1,这个可以参考这个,解释的比较清楚,大家一步步推导就可以了 

softmax交叉熵损失函数及其求导 

Mnist训练 

 使用全连接层训练可以达到92%的准确率,这个使用了全部数据集 6w训练 1w测试  使用 Conv+FC 训练, 因为训练特别慢,只用了1w训练集进行训练, 训练5个epoch后,在测试集上达到70准确率,验证了代码的有效性  

 

 训练优化器使用基本的SGD,默认使用的1e-3初始学习率,使用过大学习率的话会学飞,有时间尝试一下Adam; 

 

# log print

epoch: 19 iter: 57500 loss: 0.2737244704525886 acc: 0.902 n_correct: 51956

epoch: 19 iter: 58000 loss: 0.1859380691846724 acc: 0.902 n_correct: 52417

epoch: 19 iter: 58500 loss: 0.2722464711524591 acc: 0.903 n_correct: 52894

epoch: 19 iter: 59000 loss: 0.06477324861968438 acc: 0.903 n_correct: 53369

epoch: 19 iter: 59500 loss: 0.08215275528987825 acc: 0.903 n_correct: 53844

 

 

TODO 

添加优化器算法添加BN层 

代码链接 Git仓库

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值