逻辑回归
使用uci数据集中的data_banknote_authentication数据进行逻辑回归。
数据集:
代价cost:
最终目标是要求出使得 J(theta)最小时theta的值。采取的方法均为类似梯度下降法的方法。
不调库代码实现:
import pandas as pd
# 加载数据集
data= pd.read_csv('D:/Desktop/data_banknote_authentication.txt',header=None)
print(data.head())
X = data[[0,1,2,3]] # 取前四个作为特征
y = data[[4]]
import math
MAX_FEATURE_DIMENSION = 1024
MAX_SAMPLE_NUMBER = 1024
MAX_ITERATE_NUMBER = 1024
##估价函数
def sigmoid(z):
return(1 / (1.0 + math.exp(-z)))
def hypothesis(x, theta, feature_number):
h = 0.0
for i in range(feature_number+1):
h += x[i] * theta[i]
return(sigmoid(h))
##计算偏导数
def compute_gradient(x, y, theta, feature_number, feature_pos, sample_number):
sum = 0.0
for i in range(sample_number):
h = hypothesis(x[i], theta, feature_number)
sum += (h - y[i]) * x[i][feature_pos]
return(sum / sample_number)
##代价
def compute_cost(x, y, theta, feature_number, sample_number):
sum = 0.0
for i in range(sample_number):
h = hypothesis(x[i], theta, feature_number)
sum += -y[i] * math.log(h) - (1 - y[i]) * math.log(1 - h)
return(sum / sample_number)
##梯度下降
def gradient_descent(x, y, theta, feature_number, sample_number, alpha, count):
for i in range(count):
tmp = []
for j in range(MAX_FEATURE_DIMENSION):
tmp.append(0)
for j in range(feature_number + 1):
tmp[j] = theta[j] - alpha * compute_gradient(x, y ,theta, feature_number, j, sample_number