Kaggle-Digit Recognizer-ML

# 1.导入包
import pandas as pd
import numpy as np
import time

# 2.忽略警告
import warnings
warnings.filterwarnings("ignore")

from sklearn.neighbors import KNeighborsClassifier



# 3.读取数据
# C:\Users\hjz\AI\project\02_lianxi\01_DigitRecognizer\0_data\train.csv
dataset = pd.read_csv(r"C:\Users\hjz\AI\project\02_lianxi\01_DigitRecognizer\0_data\train.csv")
print("1.训练集大小:",dataset.shape)#(42000, 785)
x_train = dataset.values[0:,1:]
y_train = dataset.values[0:,0]#第零列是y
# 读取测试集
x_test = pd.read_csv(r"C:\Users\hjz\AI\project\02_lianxi\01_DigitRecognizer\0_data\test.csv").values#不包含y


# 4.PCA降维
# 数据集维数785维,由于数据集的维数太大,会给计算带来很大的开销,在此对数据进行PCA降维处理,并保证数据的具有原始数据95%的信息
from sklearn.decomposition import PCA
start = time.time()
pca_model = PCA()
pca_model.fit(x_train)
information_list = pca_model.explained_variance_ratio_
score = 0
num = 0
for i in information_list:
    score += i
    num += 1
    if score >= 0.95:
        print("2.降维num:",num)#154
        break
#取降维后数据
pca_model =PCA(num)
pca_model.fit(x_train)
x_train = pca_model.transform(x_train)
print("3.降维后训练集大小:",x_train.shape)#(42000, 154)
x_test = pca_model.transform(x_test)
print("4.降维后测试集大小:",x_test.shape)#(28000, 154)
end = time.time()
print("5.PCA耗时%.2f秒"%(end-start))

# 5.归一化
#对特征数据进行归一化处理
x_train = (x_train - x_train.min())/(x_train.max()-x_train.min())
x_test = (x_test - x_test.min())/(x_test.max()-x_test.min())

# # 6.KNN
print("开始进行KNN训练。。。")
knn_clf = KNeighborsClassifier(n_neighbors=4,algorithm='kd_tree',weights='distance')
start = time.time()
knn_clf.fit(x_train,y_train)
result = knn_clf.predict(x_test)
result = np.c_[range(1,len(result)+1),result.astype(int)]
df_result = pd.DataFrame(result,columns=['ImageID','Label'])
df_result.to_csv('../results.knn.csv',index=False)
end = time.time()
print("6.KNN耗时%.2f秒"%(end-start))

# # 7.LR
print("开始进行LR训练。。。")
from sklearn.linear_model import LogisticRegression
start = time.time()
lr_clf = LogisticRegression(penalty='l1',C=0.2)
lr_clf.fit(x_train.astype("float"),y_train)
result = lr_clf.predict(x_test)
result = np.c_[range(1,len(result)+1),result.astype(int)]
df_result = pd.DataFrame(result,columns=["ImageId","Label"])
df_result.to_csv("../results_lr.csv",index=False)
end = time.time()
print("7.LR耗时%.2f秒"%(end-start))

# # 8.RF
# # 随机森林是不需要对数据进行归一化处理
# # 8.1调参
from sklearn.ensemble import RandomForestClassifier
# # start = time.clock()
# # parameters = {'n_estimators': [10, 50, 100, 400], 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 5, 10]}
# # rf_clf = RandomForestClassifier(n_estimators=400, n_jobs=4, verbose=1)
# # gs_clf = GridSearchCV(rf_clf, parameters, n_jobs=1, verbose=True)
# # gs_clf.fit(X_train_small.astype('int'), y_train_small)
# # for params, mean_score, scores in gs_clf.grid_scores_:
# #     print("%0.3f for %r" % (mean_score, params))
# # elapsed = (time.clock() - start)
# # print("Time used:", elapsed)
# # 8.2 RF
print("开始进行RF训练。。。")
start = time.time()
rf_clf = RandomForestClassifier(n_estimators=400,n_jobs=4,verbose=1,criterion="gini",max_features=10)
rf_clf.fit(x_train.astype("float"),y_train)
result = rf_clf.predict(x_test)
result = np.c_[range(1,len(result)+1),result.astype(int)]
df_result = pd.DataFrame(result,columns=['ImageId','Label'])
df_result.to_csv('../results_RF.csv',index=False)
end = time.time()
print("8.RF耗时%.2f秒"%(end-start))

# 9.SVM
print("开始进行SVM训练。。。")
# # 9.1调参(略)
from sklearn.svm import SVC
# from sklearn.grid_search import GridSearchCV
# start = time.time()
# parameters = {'C':( 1, 2, 5, 10, 50) , 'gamma':[0.01, 0.02, 0.03, 0.04, 0.05]}
# svc_clf=SVC( kernel='rbf', verbose=True )
# gs_clf = GridSearchCV(svc_clf, parameters, n_jobs=1, verbose=True )
# gs_clf.fit( X_train_small.astype('float'), y_train_small )
# for params, mean_score, scores in gs_clf.grid_scores_:
#     print("%0.3f\tfor %r"  % (mean_score, scores.std() * 2, params))
# elapsed = (time.clock() - start)
# print("Time used:",elapsed)
# 9.2 SVM
start = time.time()
svc_clf = SVC(C=0.5,kernel='rbf',verbose=False,gamma=0.025)
svc_clf.fit(x_train.astype('float'),y_train)
result = svc_clf.predict(x_test)
result = np.c_[range(1,len(result)+1),result.astype(int)]
df_result = pd.DataFrame(result,columns=['ImageId','Label'])
df_result.to_csv('../results_SVM.csv',index=False)
end = time.time()
print("9.SVM耗时%.2f秒"%(end-start))
C:\ProgramData\Anaconda2\envs\py36\python.exe C:/Users/hjz/AI/project/02_lianxi/01_DigitRecognizer/01_ML/DigitRecognizer_ML.py
1.训练集大小: (42000, 785)
2.降维num: 154
3.降维后训练集大小: (42000, 154)
4.降维后测试集大小: (28000, 154)
5.PCA耗时6.79秒
开始进行KNN训练。。。
6.KNN耗时256.17秒
开始进行LR训练。。。
7.LR耗时42.33秒
开始进行RF训练。。。
[Parallel(n_jobs=4)]: Done  42 tasks      | elapsed:    5.8s
[Parallel(n_jobs=4)]: Done 192 tasks      | elapsed:   25.6s
[Parallel(n_jobs=4)]: Done 400 out of 400 | elapsed:   53.3s finished
[Parallel(n_jobs=4)]: Done  42 tasks      | elapsed:    0.0s
[Parallel(n_jobs=4)]: Done 192 tasks      | elapsed:    0.5s
[Parallel(n_jobs=4)]: Done 400 out of 400 | elapsed:    1.2s finished
8.RF耗时55.12秒
开始进行SVM训练。。。
9.SVM耗时534.41秒
Process finished with exit code 0
# -*- coding: utf-8 -*-
import xgboost as xgb
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np

def createDataSet():
    trainDataSet = pd.read_csv('../0_data/train.csv')
    testDataSet = pd.read_csv('../0_data/test.csv')
    trainDataSet = np.array(trainDataSet)
    testDataSet = np.array(testDataSet)
    trainData = trainDataSet[:, 1:len(trainDataSet)]
    trainLabels = trainDataSet[:, 0]
    testData = testDataSet
    return trainData, trainLabels, testData

def getPredict(datas, labels):
    x_train, x_test, y_train, y_test = train_test_split(datas, labels, test_size = 0.1)
    param = {
        'booster':'gbtree',
        'objective': 'multi:softmax', #多分类的问题
        'num_class':10, # 类别数,与 multisoftmax 并用
        'gamma':0.1,  # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2
        'max_depth':12, # 构建树的深度,越大越容易过拟合
        'lambda':2,  # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
        'subsample':0.8, # 随机采样训练样本
        'colsample_bytree':0.7, # 生成树时进行的列采样
        'min_child_weight':5,
        # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
        #,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
        #这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
        'silent':False,#设置成True无信息输出,
        'learning_rate': 0.05, # 学习率
        'seed':1000
    }

    xgb_train = xgb.DMatrix(data = x_train, label = y_train)
    xgb_val = xgb.DMatrix(data = x_test, label = y_test)
    xgb_test = xgb.DMatrix(x_test)

    watchlist = [(xgb_train, 'train'),(xgb_val, 'val')]

    model = xgb.train(params = param,
                      dtrain = xgb_train,
                      num_boost_round = 5000, #初始boost迭代次数
                      evals = watchlist,
                      early_stopping_rounds=100 #100轮后当模型基本没有提升时会提前结束
                      )
    print('best best_ntree_limit:', model.best_ntree_limit)
#    保存模型
    model.save_model('1.model')
    y_pred = model.predict(xgb_test)
    print(accuracy_score(y_test, y_pred))

trainData, trainLabels, testData = createDataSet()
getPredict(trainData, trainLabels)
xgbPredict = xgb.DMatrix(testData)
#训练完后可加载模型
model = xgb.Booster()
model.load_model('1.model')
y_pred = model.predict(xgbPredict)
print(y_pred)
#保存文件
f = open('submission_xgb.csv', 'w', encoding = 'utf-8')
f.write('ImageId,Label\n')
for i in range(len(y_pred)):
    f.write(str(i + 1) + ',' + str(int(y_pred[i])) + '\n')
f.close()
model成绩耗时
KNN0.97282258.80秒
LR0.8860344.04秒
RF-pca0.9478255.12秒
SVM0.88375534.41秒
XGBoost0.97282~2000秒
RF-only0.96796~50秒
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值