svm

import numpy as np
import xlrd as rd
from sklearn import svm
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,roc_auc_score
data=rd.open_workbook('results_50.xls')
table=data.sheets()[0]
print(table.nrows)
rows=table.nrows
datasets=np.empty((0,2))
labels=np.empty((0,1))
for i in range(1,rows):
    data_list=table.row_values(i)    # 0名称,1volume,2labels, 4年龄
    if data_list[2]=='MCI':
        continue
    else:
        if data_list[2]=='AD':
            input_data = np.array([int(data_list[1]), data_list[4]])
            input_labels=np.array([1])
            datasets=np.vstack((datasets,input_data))
            labels=np.vstack((labels,input_labels))
        else:
            input_data = np.array([int(data_list[1]), data_list[4]])
            input_labels = np.array([0])
            datasets = np.vstack((datasets, input_data))
            labels = np.vstack((labels, input_labels))
print(datasets.shape)
print(labels.shape)
labels=labels.ravel()
scaler = StandardScaler()#进行标准化,通过删除均值和单位方差缩放标准化功能
X = scaler.fit_transform(datasets)#先fit,再transform
C_list=[0.001,0.01,0.1,1,10,100,300]
gamma_list=[0.05,0.1,0.5,1,5,10]
kernel_list=['linear','rbf']
def randomtest():
    X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.2)
    best_gamma = 0
    best_c = 0
    best_kernel = 'linear'
    best_result = 0
    ac = 0
    au = 0
    for i in C_list:
        for j in gamma_list:
            for k in kernel_list:
                clf = SVC(C=i, gamma=j, kernel=k, probability=True)
                clf.fit(X_train, y_train)
                prediction = clf.predict(X_test)
                probability = clf.predict_proba(X_test)[:, 1].ravel()
                print(prediction)
                score = accuracy_score(prediction, y_test)
                # score=roc_auc_score(y_test,probability)
                if score > best_result:
                    best_result = score
                    best_c = i
                    best_kernel = k
                    best_gamma = j
                    # print(prediction)
                    # print(labels)
                    # ac=accuracy_score(prediction,y_test)
                    # au=roc_auc_score(prediction,y_test)
    print(ac)
    print(au)
    print(best_result)
    kf = KFold(n_splits=5)
    total_acc = 0
    total_auc = 0
    total_spe = 0
    total_sen = 0
    for train_index, test_index in kf.split(X):
        print("Train:", train_index, "Test:", test_index)
        X_train, y_train = X[train_index], labels[train_index]
        X_test, y_test = X[test_index], labels[test_index]
        clf = SVC(C=best_c, gamma=best_gamma, kernel=best_kernel, probability=True)
        clf.fit(X_train, y_train)
        prediction = clf.predict(X_test)
        proba = clf.predict_proba(X_test)[:, 1].ravel()
        acc = accuracy_score(prediction, y_test)
        auc = roc_auc_score(y_test, proba)
        total_acc += acc
        total_auc += auc
    print("acc:", total_acc / 5)
    print("auc:", total_auc / 5)
    return acc,auc
test_auc=0
test_acc=0

for i in range(10):
    ranacc, ranauc = randomtest()
    test_acc+=ranacc
    test_auc+=ranauc
print("last acc:",test_acc/10)
print("last auc:",test_auc/10)



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值