机器学习基石-作业三-代码部分

梯度下降迭代和牛顿迭代,gradient_and_newton.py

# -*- coding:utf-8 -*-
# Author: Evan Mi
import numpy as np

"""
作业三中使用梯度下降和牛顿法进行迭代
"""


def update(u, v, eta):
    u_tem = u - eta * (np.exp(u) + v * np.exp(u*v) + 2 * u - 2 * v - 3)
    v_tem = v - eta * (2 * np.exp(2*v) + u * np.exp(u * v) - 2 * u + 4 * v -2)
    return u_tem, v_tem


def iter_update(u, v, times):
    uo = u
    vo = v
    for i in range(times):
        uo, vo = update(uo, vo, 0.01)
    return np.exp(uo) + np.exp(2 * vo) + np.exp(uo * vo) + uo ** 2 - 2 * uo * vo + 2 * vo ** 2 - 3 * uo - 2 * vo


def update_newton(u, v):
    gradient_tem = np.array([np.exp(u) + v * np.exp(u*v) + 2 * u - 2 * v - 3,
                             2 * np.exp(2*v) + u * np.exp(u * v) - 2 * u + 4 * v - 2])
    laplace_tem = np.array([[np.exp(u) + (v ** 2) * np.exp(u * v) + 2, u * v * np.exp(u * v) + np.exp(u * v) - 2],
                            [u * v * np.exp(u * v) + np.exp(u * v) - 2, 4 * np.exp(2 * v) + (u ** 2) * np.exp(u * v) + 4
                             ]])
    result = np.array([u, v]) - np.dot(np.linalg.pinv(laplace_tem), np.transpose(gradient_tem))
    return result


def iter_update_newton(u, v, times):
    uo = u
    vo = v
    for i in range(times):
        uo, vo = update_newton(uo, vo)
    return np.exp(uo) + np.exp(2 * vo) + np.exp(uo * vo) + uo ** 2 - 2 * uo * vo + 2 * vo ** 2 - 3 * uo - 2 * vo


print(iter_update(0, 0, 5))
print(iter_update_newton(0, 0, 5))

 线性回归代码,

common.py

# -*- coding:utf-8 -*-
# Author: Evan Mi
import numpy as np


def data_generator(size):
    x_arr = np.concatenate((np.array([np.random.uniform(-1, 1, size)]).T, np.array([np.random.uniform(-1, 1, size)]).T),
                           axis=1)
    y_arr = target_function(x_arr)
    tem = np.ones((size, 1))
    x_arr = np.concatenate((tem, x_arr), axis=1)
    y_arr = np.where(np.random.uniform(0, 1, size) < 0.1, -y_arr, y_arr)
    return x_arr, y_arr


def sign_zero_as_neg(x):
    """
    这里修改了np自带的sign函数,当传入的值为0的时候,不再返回0,而是-1;
    也就是说在边界上的点按反例处理
    :param x:
    :return:
    """
    result = np.sign(x)
    result[result == 0] = -1
    return result


def target_function(x):
    x_tem = (x * x).sum(axis=1) - 0.6
    result = sign_zero_as_neg(x_tem)
    return result

 linear_regression_al.py

# -*- coding:utf-8 -*-
# Author: Evan Mi
import numpy as np
from linear_regression import common


def e_in_counter(x_arr, y_arr):
    w_lin = np.dot(np.dot(np.linalg.pinv(np.dot(x_arr.T, x_arr)), x_arr.T), y_arr.T)
    y_in = common.sign_zero_as_neg(np.dot(x_arr, w_lin))
    errs = np.where(y_in == y_arr, 0, 1)
    return errs.sum()/errs.size, w_lin


def e_out_counter(x_arr, y_arr, w_lin):
    y_in = common.sign_zero_as_neg(np.dot(x_arr, w_lin))
    errs = np.where(y_in == y_arr, 0, 1)
    return errs.sum() / errs.size


def transform(x_arr):
    ones_tem = x_arr[:, 0]
    x1_tem = x_arr[:, 1]
    x2_tem = x_arr[:, 2]
    return np.concatenate((np.array([ones_tem]).T, np.array([x1_tem]).T, np.array([x2_tem]).T,
                           np.array([x1_tem * x2_tem]).T, np.array([x1_tem ** 2]).T, np.array([x2_tem ** 2]).T), axis=1)


if __name__ == '__main__':
    avg = 0
    w_avg = 0
    avg_transform = 0
    w_transform = 0
    for i in range(1000):
        xo, yo = common.data_generator(1000)
        e_in, w_in = e_in_counter(xo, yo)
        avg = avg + (1.0 / (i + 1)) * (e_in - avg)
        w_avg = w_avg + (1.0 / (i + 1)) * (w_in - w_avg)

        x_trans = transform(xo)
        e_tran, w_trans = e_in_counter(x_trans, yo)
        avg_transform = avg_transform + (1.0 / (i + 1)) * (e_tran - avg_transform)
        w_transform = w_transform + (1.0 / (i + 1)) * (w_trans - w_transform)

    print("avg:", avg, "w_avg:", w_avg)
    print("avg_trans:", avg_transform, "w_trans", w_transform)

    xo, yo = common.data_generator(1000)
    x_trans = transform(xo)
    e_out = e_out_counter(x_trans, yo, w_transform)
    print("e_out:", e_out)

对率回归代码logistic_regression_al.py

# -*- coding:utf-8 -*-
# Author: Evan Mi
import numpy as np


def load_data(file_name):
    x = []
    y = []
    with open(file_name, 'r+') as f:
        for line in f:
            line = line.rstrip("\n").strip(' ')
            temp = line.split(" ")
            temp.insert(0, '1')
            x_temp = [float(val) for val in temp[:-1]]
            y_tem = [int(val) for val in temp[-1:]][0]
            x.append(x_temp)
            y.append(y_tem)

    nx = np.array(x)
    ny = np.array(y)
    return nx, ny


def gradient_decent_logistic_regression(x, y, eta, w, times):
    local_w = w
    for i in range(times):
        tem_w = np.dot((1.0 / (1 + np.exp(-((-y) * np.dot(x, local_w))))), np.array([-y]).T * x) / np.size(y)
        local_w = local_w - eta * tem_w
    return local_w


def stochastic_gradient_decent_logistic_regression(x, y, eta, w, times):
    local_w = w
    index = 0
    for i in range(times):
        x_tem = x[index, :]
        y_tem = y[index]
        tem_w = (1.0 / (1 + np.exp(-((-y_tem) * np.dot(local_w, x_tem))))) * (-y_tem) * x_tem
        local_w = local_w - eta * tem_w
        index = (index + 1) % np.size(y)
    return local_w


def e_out_counter(x, y, w):
    local_tem = 1.0 / (1 + np.exp(np.dot(x, w)))
    vec_result = np.where(local_tem > 0.5, 1, -1)
    result = np.where(vec_result == y, 1, 0)
    return sum(result)/np.size(result)


if __name__ == '__main__':
    x_train, y_train = load_data('data/train.dat')
    x_val, y_val = load_data('data/test.dat')
    w_one = gradient_decent_logistic_regression(x_train, y_train, 0.001, np.zeros(np.size(x_train, 1)), 2000)
    e_out_one = e_out_counter(x_val, y_val, w_one)
    print("e_out_one:", e_out_one)

    w_two = gradient_decent_logistic_regression(x_train, y_train, 0.01, np.zeros(np.size(x_train, 1)), 2000)
    e_out_two = e_out_counter(x_val, y_val, w_two)
    print("e_out_two:", e_out_two)

    w_s = stochastic_gradient_decent_logistic_regression(x_train, y_train, 0.001, np.zeros(np.size(x_train, 1)), 2000)
    e_out_s = e_out_counter(x_val, y_val, w_s)
    print("e_out_s:", e_out_s)

详细项目代码及代码使用的数据见:梯度下降与牛顿迭代线性回归对率回归

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值