1. 相关包导入
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
all_names = ["gender" "age", "job", "industry", "rfm_label"]
feature_names = ["gender", "age"]
if __name__ == '__main__':
linear_model()
2. 模型定义
def linear_model():
data = pd.read_csv('../lr_data/train.csv', header=None, sep='\t').fillna(0)
data.columns = all_names
X = data.loc[:, feature_names]
Y = data.loc[:, 'revolve_rate']
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=0)
transfer = StandardScaler()
x_train = transfer.fit_transform(X_train)
x_test = transfer.fit_transform(X_test)
estimator = LinearRegression() # LR线性回归
# estimator = SGDRegressor(max_iter=1000) # 梯度下降法
# estimator = Ridge(alpha=1) # 岭回归
estimator.fit(x_train, y_train)
# 5.模型评估
y_predict = estimator.predict(x_test)
print("预测值为:\n", y_predict)
print("模型中的系数为:\n", estimator.coef_)
print("模型中的偏置为:\n", estimator.intercept_)
error = mean_squared_error(y_test, y_predict)
print("误差为:\n", error)
3. R2_score
参考:r2_score使用方法_*Snowgrass*的博客-CSDN博客_r2_score
# python 内置方法
print(estimator.score(x_test, y_test))
# 自定义
def r2_score(y_true, y_predict):
return 1 - mean_squared_error(y_true, y_predict) / np.var(y_true)
R2通俗地理解为使用均值作为误差基准,看预测误差是否大于或者小于均值基准误差。
R2_score=1: 样本中预测值和真实值完全相等,没有任何误差,表示回归分析中自变量对因变量的解释越好;
R2_score=0: 此时分子等于分母,样本的每项预测值都等于均值。