Batch Gradient Descent and Visualization

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
"""mini-batch Gradient descent"""
random.seed(2018)
learning_rate = 0.01
def da(y, y_pre, x):
    return (y-y_pre)*(-x)

def db(y, y_pre):
    return (y-y_pre)*(-1)

def calc_loss(a, b, x, y):
    detal_y = y - (a * x + b)
    SSE = np.sum(detal_y ** 2)/(2 * len(x))
    return SSE

def draw_hill(x, y):
    a = np.linspace(-20, 20, 100)
    b = np.linspace(-20, 20, 100)
    x = np.array(x)
    y = np.array(y)

    allSSE = np.zeros(shape=(len(a), len(b)))
    for ai in range(len(a)):
        for bi in range(len(b)):
            a0 = a[ai]
            b0 = b[bi]
            SSE = calc_loss(a=a0, b=b0, x=x, y=y)
            allSSE[ai][bi] = SSE

    a, b = np.meshgrid(a, b)
    return (a, b, allSSE)

def shuffle_data(x, y):
    random.shuffle(x)
    random.shuffle(y)

def get_batch_data(x,y,batch=3):
    shuffle_data(x,y)
    x_new = x[0:batch]
    y_new = y[0:batch]
    return (x_new, y_new)

#Simulated data
x = [30, 35, 37, 59, 70, 76, 88, 100]
y = [1100, 1423, 1377, 1800, 2304, 2588, 3495, 4839]

#Data Normalization
x_max = max(x)
x_min = min(x)
y_max = max(y)
y_min = min(y)

for i in range(len(x)):
    x[i] = (x[i] - x_min)/(x_max - x_min)
    y[i] = (y[i] - y_min)/(y_max - y_min)

ha, hb, hallSSE = draw_hill(x, y)
hallSSE = hallSSE.T

#init a, b
a = 10.0
b = -20.0
fig = plt.figure(1, figsize=(14,10))

#plot picture1
ax = fig.add_subplot(221, projection='3d')
ax.set_top_view()
ax.plot_surface(ha, hb, hallSSE, rstride=2, cstride=2, cmap='rainbow')
ax.set_title('the 3d surface figure')

# plot picture2
plt.subplot(222)
#Fill color
plt.contourf(ha, hb, hallSSE, 15, alpha=0.5, cmap=plt.cm.hot)
#Draw contour lines
C = plt.contour(ha, hb, hallSSE, 15, colors='black')
plt.clabel(C, inline=True, fontsize=10)
plt.xlabel('a')
plt.ylabel('b')
plt.title('Contout map')
plt.ion()

all_loss = []
all_step = []
last_a = a
last_b = b
for step in range(200):
    loss = 0
    all_da = 0
    all_db = 0
    shuffle_data(x,y)
    x_new, y_new = get_batch_data(x,y,batch=4)
    for i in range(len(x_new)):
        y_pre = a*x_new[i] +b
        loss = loss + ((y_new[i] - y_pre)**2)/2
        all_da = all_da + da(y_new[i], y_pre, x_new[i])
        all_db = all_db + db(y_new[i],y_pre)
    loss = loss/len(x)

    ax.scatter(a, b, loss, color='black')

    plt.subplot(222)
    plt.scatter(a,b,s=5,color='blue')
    plt.plot([last_a, a], [last_b, b], color='aqua')

    plt.subplot(223)
    plt.title('Regression line')
    plt.plot(x,y)
    plt.plot(x, y, 'o')
    x_ = np.linspace(0,1,2)
    y_draw = a*x_ + b
    plt.plot(x_, y_draw)

    all_loss.append(loss)
    all_step.append(step)
    plt.subplot(224)
    plt.title('Loss')
    plt.plot(all_step, all_loss, color='orange')
    plt.xlabel('step')
    plt.ylabel('loss')

    last_a = a
    last_b = b
    a = a - learning_rate * all_da
    b = b - learning_rate * all_db

    if (step+1)%1 == 0:
        print("step: {}, loss: {}".format(step+1, loss))
        plt.show()
        plt.pause(0.01)

plt.show()
plt.pause(9999999)

Notice:

This article was written after I read other's blog and implementation it.

So, it's just a study note, anyway, Infringement delete!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值