# X=X.values
# y=y.values
#更改为bor特征选择器
# import sklearn
# from lightgbm.sklearn import LGBMClassifier
# from sklearn.ensemble import RandomForestClassifier
# from boruta import BorutaPy
# rf = LGBMClassifier(num_leaves=60,max_depth=7,n_estimators=1200,learning_rate=0.03,bagging_fraction=0.7,bagging_freq=60,max_bin=150,min_data_in_leaf=60,feature_fraction=0.7,random_state=1)
# feat_selector = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=1,max_iter=20)
# # find all relevant features - 5 features should be selected
# feat_selector.fit(X, y)
# check ranking of features
#feat_selector.ranking_
X = train_data.drop(['ts_code','date','rownum','target','week', 'year'],axis=1)#原理的X加了values就不行了
t = pd.DataFrame(pd.DataFrame(X).columns)
t.columns = ['var']
t['importance'] = feat_selector.ranking_
#t = t.sort_values(by=['importance'],ascending=False)
var_ok = t[t['importance']<=4]['var'].values