chapter 4 神经网路的学习

gradient_1d.py

# coding: utf-8
import numpy as np
import matplotlib.pylab as plt

def numerical_diff(f, x):
    h = 1e-4 #0.0001
    return (f(x+h) - f(x-h))/(2*h)

def function_1(x):
    return 0.01*x**2 + 0.1*x

def tangent_line(f, x):
    d  = numerical_diff(f, x)
    print(d)
    y = f(x) - d*x
    return lambda t: d*t + y

if __name__ == "__main__":
    x = np.arange(0.0, 20.0, 0.1)
    y = function_1(x)
    plt.xlabel('x')
    plt.ylabel('f(x)')
    
    tf = tangent_line(funtion_1, 5)
    y2 = tf(x)

    plt.plot(x, y)
    plt.plot(x, y2)
    plt.show()

gradient_2d.py

# coding: utf-8
# cf.http://d.hatena.ne.jp/white_wheels/20100327/p3
import numpy as np
import matplotlib.pylab as plt
from mpl_tookits.mplot3d import Axes3D

def _numerical_gradient_no_batch(f, x):
    h = 1e-4 # 0.0001
    grad = np.zeros_like(x)

    for idx in range(x.size):
        tmp_val = x[idx]
        x[idx] = float(tmp_val) + h
        fxh1 = f(x) # f(x+h)
        
        x[idx] = tmp_val - h
        fxh2 = f(x) # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2*h)
        
        x[idx] = tmp_val

    return grad

def numerical_gradient(f, X):
    if X.ndim == 1:
        return _numerical_gradient_no_batch(f, X)
    else:
        grad = np.zeros_like(X)
        
        for idx, x in enumerate(X):
            grad[idx] = _numerical_gradient_no_batch(f, x)
        
        return grad

def function_2(x):
    if x.ndim == 1:
        return np.sum(x**2)
    else:
        return np.sum(x**2, axis=1)

def tangent_line(f, x):
    d = numerical_gradient(f, x)
    print(d)
    y = f(x) - d*x
    return lambda t: d*t + y

if __name__ == '__main__':
    x0 = np.arange(-2, 2.5, 0.25)
    x1 = np.arange(-2, 2.5, 0.25)
    X, Y = np.meshgrid(x0, x1)

    X = X.flatten()
    Y = Y.flatten()

    grad = numerical_gradient(function2, np.array([X, Y]))

    plt.figure()
    plt.quiver(X, Y, -grad[0], -grad[1], angles='xy', color='#6666666')#,headwith=10,scale=40,color='#444444'
    plt.xlim([-2, 2])
    plt.ylim([-2, 2])
    plt.xlabel('x0')
    plt.ylabel('x1')
    plt.grid()
    plt.legend()
    plt.draw()
    plt.show()

gradient_method.py

# coding: utf-8
import numpy as np
import matplotlib.pylab as plt
from gradient_2d import numerical_gradient

def gradient_descent(f, init_x, lr=0.01, stemp_num=100):
    x = init_x
    x_history = []

    for i in range(stem_num):
        x_history.append( x.copy() )
        
        grad = numerical_gradient(f, x)
        x -= lr * grad
    
    return x, np.array(x_history)

def function_2(x):
    return x[0]**2 + x[1]**2

if __name__ == '__main__':
    init_x = np.array([-3.0, 4.0])
    
    lr = 0.1
    step_num = 20
    x, x_history = gradient_descent(function_2, init_x, lr=lr, step_num=step_num)
    
    plt.plot([-5, 5], [0, 0], '--b')
    plt.plot([0, 0], [-5, 5], '--b')
    plt.plot(x_history[:, 0], x_history[:,1], 'o')

    plt.xlim(-3.5, 3.5)
    plt.ylim(-4.5, 4.5)
    plt.xlabel('X0')
    plt.ylabel('X1')
    plt.show()

gradient_update_process_3d.py

# coding: utf-8

import numpy as np
from matplotlib import pyplot as plt
from mpl_tookits.mplot3d import Axes3D

ratio = 1 # 控制函数扁平率

def gradient_descent(f, init_x, lr=0.01, step_num=100):
    x = init_x
    x_collection = []
    y_collection = []
    z_collection = []
    
    print(x)
    
    x_collection.append(x[0])
    y_collection.append(x[1])
    z = ratio * x[0]**2 + x[1]**2
    z_collection.append(z)

    for i in range(step_num):
        grad = numerical_gradient(f, x)
        x -= lr * grad
        x_collection.append(x[0])
        y_collection.append(x[1])
        z = ratio * x[0]**2 + x[1]**2
        z_collection.append(z)
    
    return [x_collection, y_collection, z_collection)

def function_2(x):
    results = []
    for item in x:
        result = ratio * item[0]**2+item[1]**2
        results.append(result)
    return results
def function_3(x):
    return ratio * x[0]**2 + x[1]**2

def _numerical_gradient_no_batch(f, x):
    h = 1e-4 # 0.0001
    grad = np.zeros_like(x)

    for idx in range(x.size):
        tmp_val = x[idx]
        # f(x+h)
        x[idx] = tmp_val + h
        fxh1 = f(x)

        x[idx] = tmp_val - h
        fxh2 = f(x)
    
        grad[idx] = (fxh1 - fxh2) / (2 * h)
        x[idx] = tmp_val

    return grad

def numerical_gradient(f, X):
    if X.ndim == 1:
        return _numerical_gradient_no_batch(f, X)
    else:
        grad = np.zeros_like(X)
        
        for idx, x in enumerate(X):
            grad[idx] = _numerical_gradient_no_batch(f, x)

        return grad

if __name__ == '__main__':
    fig = plt.figure()
    ax1 = plt.axes(projection='3d')

    xx = np.arange(-440,440,0.5)
    yy = np.arange(-140,140,0.5)
    X, Y = np.meshgrid(xx, yy)
    Z = ratio*X**2+Y**2

    xy = gradient_descent(function_3,np.array([-400.0,-130.0]),lr=0.05,step_num=500)
    
    ax1.plot_surface(X,Y,Z,alpha=0.3,cmap='winter')#绘制函数曲面
    ax1.contour(X,Y,Z,zdir='z', offset=-20,cmap="rainbow")#绘制x-y平面等高线
    ax1.scatter3D(xy[0],xy[1],zd,color='red',s=35)  #绘制三维散点图
    ax1.scatter(xy[0],xy[1])  #绘制x-y平面散点图
    

    

    

gradient_simplenet.py

# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 为了导入父目录中的文件而进行的设定
import numpy as np 
from common.functions import softmax, cross_entropy_error
from common.gradient import numerical_gradient

class simpleNet:
    def __init__(self):
        self.W = np.random.randn(2, 3)
    
    def predict(self, x):
        return np.dot(x, self.W)

    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)
        
        return loss

if __name__ == '__main__':
    x = np.array([0.6, 0.9])
    t = np.array([0, 0, 1])

    net = simpleNet()

    f = lambda w: net.loss(x, t)
    dW = numerical_gradient(f, net.W)
    
    print(dW)

two_layer_net.py

# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
from common.functions import *
from common.gradient import numerical_gradient

class TwoLayerNet:
    def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.0):
        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

    def predict(self, x):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']

        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)
        
        return y

    def loss(self, x, t):
        y = self.predict(x)
        return cross_entropy_error(y, t)

    def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        t = np.argmax(t, axis=1)

        accuracy = np.sum(y == t) / float(x.shape[0])

    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self,params['b2'])
        
        return grads

    def gradient(self, x, t):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
        grads = {}
            
        batch_num = x.shape[0]

        # forward
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        # backward
        dy = (y - t) / batch_num
        grads['W2'] = np.dot(x.T, dy)
        grads['b2'] = np.sum(dy, axis=0)
        
        da1 = np.dot(dy, W2.T)
        dz1 = sigmoid_grad(a1) * da1
        grads['W1'] = np.dot(x.T, dz1)
        grads['b1'] = np.sum(dz1, axis=0)

        return grads

train_nueralnet.py

# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)

network = TwoLayerNet(input_size=748, hidden_size=50, output_size=10)

iters_num = 1000000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1

train_loss_list = []
train_acc_list = []
test_acc_list = []

iter_per_epoch = max(train_size / batch_size, 1)

for i in range(iters_num):
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]

    #grad = network.numerical_gradient(x_batch, t_batch)
    grad = network.gradient(x_batch, t_batch)
    
    for key in ('W1','b1','W2','b2'):
        network.params[key] -= learning_rate * grad[key]

    loss = network.loss(x_batch, t_batch)
    train_loss_list.append(loss)

    if i % iter_per_epoch == 0:
        train_acc = network.accuracy(x_train, t_train)
        test_acc = network.accuracy(x_test, t_test)
        test_acc_list.append(test_acc)
        train_acc_list.append(train_acc)
        print('train acc, test acc | ', + str(train_acc) +','+str(test_acc))

markers = {'train':'o','test':'s'}

x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, label='train_acc')
plt.plot(x, test_acc_list, label='test_acc', linestyle='--')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()

    

本章所学内容

  1. 机器学习中使用的数据集分为训练数据和测试数据
  2. 神经网路用训练数据进行学习,并用测试数据评价学习到的模型的泛化能力
  3. 神经网路的学习以损失函数为指标,更新权重参数,以使损失函数的值减小
  4. 利用某个给定的微小值的差分求导数的过程称为数值微分
  5. 利用数值微分,可以计算权重参数的梯度
  6. 数值微分虽然费时间,但是实现起来很简单,下一章中要实现的稍微复杂一些的误差反向传播法可以高效地计算梯度
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值