python3机器学习实战logistic回归,梯度与随机梯度效果对比,matplotlib绘制散点图和边界线

这篇博客通过Python3实现Logistic回归,对比了梯度上升和随机梯度上升两种优化方法的效果。文章展示了500次梯度上升的收敛结果,以及一次随机梯度上升的学习过程,包括权重变化图。进一步,文章用20次随机梯度上升并加入学习率衰减,得到与梯度上升相似的收敛图。
摘要由CSDN通过智能技术生成

main.py

import random
import numpy as np
import matplotlib.pyplot as plt


def load_dataset():
    """
    前两行两个值分别是X1和X2,第三个是标签类别
    """
    data_mat = []
    label_mat = []
    # 读取格式:-0.017612	14.053064	0
    with open('testSet.txt', 'r') as f:
        for line in f.readlines():
            # 拆分放到arr里面
            line_arr = line.strip().split()
            # 第一位是1?
            data_mat.append([1., float(line_arr[0]), float(line_arr[1])])
            # 标签变成2
            label_mat.append(int(line_arr[2]))
    return data_mat, label_mat


def sigmoid(inx):
    # 那个s形的函数
    return 1.0 / (1 + np.exp(-inx))


def grad_ascent(data_mat, class_labels):
    """
    梯度上升
    :param data_mat:
    :param class_labels:
    :return:
    """
    # matrix格式
    data_matrix = np.mat(data_mat)  # 2D np array,列是特征,行是样本
    # 也是matrix格式,并站了起来
    label_mat = np.mat(class_labels).transpose()  # 转置,行变列
    m, n = np.shape(data_matrix)
    alpha = 0.001  # lr
    max_cycles = 500  # iter
    weights = np.ones((n, 1))
    # 保存weight
    weights_history = np.zeros((max_cycles, n))
    for k in range(max_cycles):
        # alpha = 0.004 / (1.0 + k) + 0.001
        h = sigmoid(data_matrix * weights)  # 是一个向量
        error = (label_mat - h)  # 真是简陋
        # 每次都撸一个进去
        weights = weights + alpha * data_matrix.transpose() * error
        weights_history[k, :] = weights.transpose()
    # 根本就不收敛
    return weights, weights_history


def stoc_grad_ascent(data_mat, class_labels, iter_counts=20):
    m, n = np.shape(data_mat)
    # alpha = 0.01  # 改进前
    weights = np.ones(n)
    # 迭代次数
    # iter_counts = 20
    # 存放历史
    weights_history = np.zeros((m * iter_counts, n))
    # 只跑那么多样本
    for j in range(iter_counts):
        data_index = list(range(m))
        for i in range(m):
            # 模拟退火常见?
            alpha = 4 / (1.0 + j + i) + 0.01  # 改进后,减少波动
            # 下面这种改动似乎不靠谱
            # rand_index = int(random.uniform(0,len(data_index)))
            h = sigmoid(np.sum(data_mat[i] * weights))
            error = class_labels[i] - h
            weights = weights + alpha * error * data_mat[i]
            weights_history[i + j * m, :] = weights
            # data_index.pop(rand_index)
    return weights, weights_history


def plot_data(weights):
    """
    画决策边界和散点图
    :param weights:
    :return:
    """
    import matplotlib.pylab as plt
    # 原本的weights是matrix,要转换为ndarray
    weights = np.squeeze(np.asarray(weights))
    data_mat, label_mat = load_dataset()
    data_arr = np.array(data_mat)
    n = np.shape(data_arr)[0]
    xcord1, ycord1, xcord2, ycord2 = [], [], [], []
    for i in range(n):
        # 分两类
        if int(label_mat[i]) == 1:
            xcord1.append(data_arr[i, 1])
            ycord1.append(data_arr[i, 2])
        else:
            xcord2.append(data_arr[i, 1])
            ycord2.append(data_arr[i, 2])
    # 创建画板
    fig = plt.figure()
    # 设置布局
    ax = fig.add_subplot(111)
    # 散点图
    ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
    ax.scatter(xcord2, ycord2, s=30, c='green')
    x = np.arange(-3.0, 3.0, 0.1)
    y = (-weights[0] - weights[1] * x) / weights[2]
    # 连续的线段
    ax.plot(x, y)
    plt.xlabel('X1')
    plt.ylabel('X2')
    plt.show()


def plot_weights_update(weights_history):
    """用来画权重的更新图"""

    fig = plt.figure()
    # 三行一列的第一行
    ax = fig.add_subplot(311)
    type1 = ax.plot(weights_history[:, 0])
    plt.ylabel('X0')
    ax = fig.add_subplot(312)
    type2 = ax.plot(weights_history[:, 1])
    plt.ylabel('X1')
    ax = fig.add_subplot(313)
    type3 = ax.plot(weights_history[:, 2])
    plt.xlabel('iteration')
    plt.ylabel('X2')
    plt.show()


if __name__ == '__main__':
    data_arr, label_mat = load_dataset()  # 加载数据
    weights1, weights_history1 = grad_ascent(data_arr, label_mat)  # 梯度上升

    weights, weights_history = stoc_grad_ascent(np.array(data_arr), label_mat)  # 随机梯度

    plot_weights_update(weights_history)  # 随机
    plot_weights_update(weights_history1)  # 梯度
    plot_data(weights)  # 随机
    plot_data(weights1)  # 梯度
    pass

梯度上升

500次梯度上升


梯度上升的收敛结果:


随机梯度上升

一次随机梯度上升



与之对应的一次weights学习图:



随机梯度迭代20次,并加入学习率衰减,与梯度上升结果基本一样:


再看收敛图:


完美


附录:

testSet.txt

-0.017612	14.053064	0
-1.395634	4.662541	1
-0.752157	6.538620	0
-1.322371	7.152853	0
0.423363	11.054677	0
0.406704	7.067335	1
0.667394	12.741452	0
-2.460150	6.866805	1
0.569411	9.548755	0
-0.026632	10.427743	0
0.850433	6.920334	1
1.347183	13.175500	0
1.176813	3.167020	1
-1.781871	9.097953	0
-0.566606	5.749003	1
0.931635	1.589505	1
-0.024205	6.151823	1
-0.036453	2.690988	1
-0.196949	0.444165	1
1.014459	5.754399	1
1.985298	3.230619	1
-1.693453	-0.557540	1
-0.576525	11.778922	0
-0.346811	-1.678730	1
-2.124484	2.672471	1
1.217916	9.597015	0
-0.733928	9.098687	0
-3.642001	-1.618087	1
0.315985	3.523953	1
1.416614	9.619232	0
-0.386323	3.989286	1
0.556921	8.294984	1
1.224863	11.587360	0
-1.347803	-2.406051	1
1.196604	4.951851	1
0.275221	9.543647	0
0.470575	9.332488	0
-1.889567	9.542662	0
-1.527893	12.150579	0
-1.185247	11.309318	0
-0.445678	3.297303	1
1.042222	6.105155	1
-0.618787	10.320986	0
1.152083	0.548467	1
0.828534	2.676045	1
-1.237728	10.549033	0
-0.683565	-2.166125	1
0.229456	5.921938	1
-0.959885	11.555336	0
0.492911	10.993324	0
0.184992	8.721488	0
-0.355715	10.325976	0
-0.397822	8.058397	0
0.824839	13.730343	0
1.507278	5.027866	1
0.099671	6.835839	1
-0.344008	10.717485	0
1.785928	7.718645	1
-0.918801	11.560217	0
-0.364009	4.747300	1
-0.841722	4.119083	1
0.490426	1.960539	1
-0.007194	9.075792	0
0.356107	12.447863	0
0.342578	12.281162	0
-0.810823	-1.466018	1
2.530777	6.476801	1
1.296683	11.607559	0
0.475487	12.040035	0
-0.783277	11.009725	0
0.074798	11.023650	0
-1.337472	0.468339	1
-0.102781	13.763651	0
-0.147324	2.874846	1
0.518389	9.887035	0
1.015399	7.571882	0
-1.658086	-0.027255	1
1.319944	2.171228	1
2.056216	5.019981	1
-0.851633	4.375691	1
-1.510047	6.061992	0
-1.076637	-3.181888	1
1.821096	10.283990	0
3.010150	8.401766	1
-1.099458	1.688274	1
-0.834872	-1.733869	1
-0.846637	3.849075	1
1.400102	12.628781	0
1.752842	5.468166	1
0.078557	0.059736	1
0.089392	-0.715300	1
1.825662	12.693808	0
0.197445	9.744638	0
0.126117	0.922311	1
-0.679797	1.220530	1
0.677983	2.556666	1
0.761349	10.693862	0
-2.168791	0.143632	1
1.388610	9.341997	0
0.317029	14.739025	0
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值