xgboost实战

#!/usr/bin/env python
# -*- coding:utf-8 -*- 
# Author: Jia ShiLin

import pandas as pd
import xgboost as xgb
from sklearn import preprocessing
import numpy as np

#data
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier

path_value= 'data/xlnet_emb_pca_768.npy'
path_label= 'data/signal.npy'
x =np.load(path_value)
y =np.load(path_label)
#数据标准化,
scalar = StandardScaler().fit(x)
x = scalar.transform(x)


print(x)
print(y)
# #data
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.15)

# eval_set = [(X_test,y_test)]
# model = XGBClassifier()
# # make predictions for test data
# y_pre = model.predict(X_test)
# predictions = [round(value) for value in y_pre]
# # evaluate predictions
# accuracy = accuracy_score(y_test,predictions)
# print("Accuracy: %.2f%%" % (accuracy * 100.0))

print(X_train.shape, X_test.shape)
#
# #模型参数设置
# params ={'learning_rate': 0.1,
#           'max_depth': 5,
#           'num_boost_round':20,
#           'objective': 'multi:softmax',
#           'random_state': 27,
#           'silent':0,
#           'num_class':4
#         }
# model = xgb.train(params,xgb.XGBClassifier(X_train, y_train))#,num_boost_round=20)
# y_pred=model.predict(xgb.XGBClassifier(X_test))



#XGBClassifier(max_depth=3, learning_rate=0.1, n_estimators=100, silent=True, objective='binary:logistic',
#              booster='gbtree', n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0, subsample=1,
#              colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
#              base_score=0.5, random_state=0, seed=None, missing=None, **kwargs)


clf = XGBClassifier(
#    n_estimators=10,  # 迭代次数
#    learning_rate=0.03,  # 步长
#    max_depth=6,  # 树的最大深度

    n_estimators=300,  # Number of boosted trees to fit.
    learning_rate=0.01,  # 步长 Boosting learning rate (xgb’s “eta”)
    max_depth=30,  # 树的最大深度Maximum tree depth for base learners.

    min_child_weight=1,  # 决定最小叶子节点样本权重和
    subsample=0.9,  # 每个决策树所用的子样本占总样本的比例(作用于样本)
    colsample_bytree=0.6,  # 建立树时对特征随机采样的比例(作用于特征)典型值:0.5-1
    objective='multi:softmax',  # 多分类!!!!!!
    num_class=3,
#    nthread=50,
    n_jobs=50,
    max_delta_step=10,
    reg_lambda=1,
    reg_alpha=0,
    seed=27)

print ("training...")
clf.fit(X_train, y_train,  verbose=True)

clf.save_model('tree300.model')

print('training is ok')
fit_pred = clf.predict(X_test)
print (fit_pred)
count=0
for i in range(len(fit_pred)):
    if fit_pred[i] == y_test[i]:
        count += 1
print ("len:", count/len(y_test))

'''
y_test=test['label'].values
tar = xgb.Booster(model_file='tree200.model')
x_test1 = xgb.DMatrix(x_test)
fit_pred1 = tar.predict(x_test1)
count=0
for i in range(len(fit_pred1)):
    arg=np.argmax(fit_pred1[i])
    if arg == y_test[i]:
        count += 1
print ("len:", count/len(y_test))
'''

# inde=list(train.drop(target,axis=1).columns)
#
#
# ww=(clf.feature_importances_)
# print(ww)
# feat_imp = pd.Series(ww,index=inde).sort_values(ascending=False)
#
# feat_imp.to_excel('feature_importance.xlsx')
#
# #print(feat_imp)
# #plt.set_size_inches(20,10)
# feat_imp.plot(kind='bar', title='Feature Importances')
#
#
# plt.ylabel('Feature Importance Score')

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

贾世林jiashilin

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值