分类模型——变量选择

import numpy as np  
import scipy as sp  
import pandas as pd
import matplotlib.pyplot as plt

Split train and test

from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(customer.ix[:,0:customer.columns.size-1], customer.ix[:,customer.columns.size-1], test_size = 0.2)
x_train, x_test, y_train, y_test = train_test_split(order.ix[:,0:order.columns.size-1], order.ix[:,order.columns.size-1], test_size = 0.2)

Pearson Correlation for Order

from scipy.stats import pearsonr  

prr = []
for i in range(order.columns.size-1):
   frame = pearsonr(order.iloc[:,i], order.iloc[:,order.columns.size-1]) 
   prr.append(frame)

result = pd.concat([pd.DataFrame(order.columns.values.tolist()), pd.DataFrame(prr)], axis=1) 
result.columns = ['Features', 'Pearson', 'Pvalue']
result
result.to_csv('result.csv', index = True, header = True)

Pearson Correlation for Customer

from scipy.stats import pearsonr  
prr = []
for i in range(customer.columns.size-1):
   frame = pearsonr(customer.iloc[:,i], customer.iloc[:,customer.columns.size-1]) 
   prr.append(frame)

result = pd.concat([pd.DataFrame(customer.columns.values.tolist()), pd.DataFrame(prr)], axis=1) 
result.columns = ['Features', 'Pearson', 'Pvalue']
result
result.to_csv('result.csv', index = True, header = True)

Random forest

from sklearn.ensemble import RandomForestRegressor  
clf = RandomForestRegressor()
clf.fit(x_train, y_train)

from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_jobs=100)
clf.fit(x_train, y_train)

MIC

from minepy import MINE
mic = []
for i in range(customer.columns.size-1):
   frame = m.compute_score(customer.iloc[:,i], customer.iloc[:,34]) 
   prr.append(frame)
result = pd.concat([pd.DataFrame(customer.columns.values.tolist()), pd.DataFrame(prr)], axis=1) 
result.columns = ['Features', 'Pearson', 'Pvalue']
result.to_csv('result.csv', index = True, header = True)

Feature Correlation

corr = customer.corr()
corr.to_csv('result.csv', index = True, header = True)

tar_corr = lambda x: x.corr(x['tar'])
cus_call.apply(tar_corr)
cus_call.corrwith(cus_call.tar)

Feature Importance

系数反映每个特征的影响力。越大表示该特征在分类中起到的作用越大

importances = pd.DataFrame(sorted(zip(x_train.columns, map(lambda x: round(x, 4), clf.feature_importances_)), reverse=True))
importances.columns = ['Features', 'Importance']
importances.to_csv('result.csv', index = True, header = True)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值