损失函数为CrossEntropyCost + L1的手写数字识别的完整代码实现

损失函数为 CrossEntropyCost+L1

import random
import numpy as np


class Network4(object):
    def __init__(self, sizes, cost):
        self.layernumber = len(sizes)
        self.sizes = sizes
        self.default_weight()
        self.cost = cost


    def default_weight(self):
        self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
        self.weights = [np.random.randn(y, x) / np.sqrt(x)
                        for x, y in zip(self.sizes[:-1], self.sizes[1:])]

    def Cross_entropy(self, a, y, lamda, n):
        return (np.sum(np.nan_to_num(-y * np.log(a) - (1 - y) * np.log(1 - a)))) \
               + (lamda / n * np.sum(np.abs(w) for w in self.weights))

    def evaluate(self, test_data):
        test_result = [(np.argmax(self.feedforward(x)), y)
                       for x, y in test_data]
        return np.sum(int(x == y) for (x, y) in test_result)

    def cost_derivate(self, out_activation, y):
        return out_activation - y

    def feedforward(self, a):
        for w, b in zip(self.weights, self.biases):
            a = sigmoid(np.dot(w, a) + b)
        return a

    def backprop(self, x, y):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        activation = x
        activations = [x]
        zs = []
        for b, w in zip(self.biases, self.weights):
            z = np.dot(w, activation) + b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)
        delta = self.cost_derivate(activations[-1], y)
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activation[-2].transpose())
        for j in range(2, self.layernumber):
            z = zs[-j]
            ps = sigmoid_prime(z)
            delta = np.dot(self.weights[-j+1].transpose(), delta) * ps
            nabla_b[-j] = delta
            nabla_w[-j] = np.dot(delta, activations[-j-1].transpose())
        return nabla_b, nabla_w

    def update_mini_batch(self, mini_batch, eta, lamda, n):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        for x, y in mini_batch:
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)
            nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
            nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
        self.weights = [w - (eta / len(mini_batch)) * nw - (lamda / n) * sgn(w)
                        for w, nw in zip(self.weights, nabla_w)]
        self.biases = [b - (eta / len(mini_batch)) * nb
                       for b, nb in zip(self.biases, nabla_b)]

    def sgd(self, training_data, epochs, mini_batch_size, eta, lamda, test_data=None):
        if test_data:
            n_test = len(test_data)
        n = len(training_data)
        for i in range(epochs):
            random.shuffle(training_data)
            mini_batches = [training_data[k, k + mini_batch_size]
                            for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta, lamda, n)
            if test_data:
                print('Epoch{0}:{1}/{2}'.format(i, self.evaluate(test_data), n_test))
            else:
                print('Epoch{0} complete'.format(i))


def sigmoid(z):
    return 1.0/(1.0 + np.exp(-z))


def sigmoid_prime(z):
    return sigmoid(z) * (1 - sigmoid(z))


def sgn(w):
    return w if w > 0 else 0
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值