# 逻辑回归
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
filename = 'D:/0520代码+数据/第3、4次课:代码+数据/pima_data.csv'
names = ['preq', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
array = data.values
X = array[:, 0:8]
Y = array[:, 8]
n_splits = 10
test_size = 0.33
seed = 7
kfold = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=seed)
model = LogisticRegression(multi_class='multinomial', max_iter=3000)
result = cross_val_score(model, X, Y, cv=kfold)
print('算法评估: %.3f%% (%.3f%%)' % (result.mean() * 100, result.std() * 100))
# 线性判别分析(LDA)
from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import cross_val_score
filename = 'D:/0520代码+数据/第3、4次课:代码+数据/pima_data.csv'
names = ['preq', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
array = data.values
X = array[:, 0:8]
Y = array[:, 8]
num_fold = 10
seed = 7
kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
model = LinearDiscriminantAnalysis()
result = cross_val_score(model, X, Y, cv=kfold)
print(result.mean())
# K近邻算法 K个相似的样本属于一类
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
filename = 'D:/0520代码+数据/第3、4次课:代码+数据/pima_data.csv'
names = ['preq', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
array = data.values
X = array[:, 0:8]
Y = array[:, 8]
num_fold = 10
seed = 7
kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
model = KNeighborsClassifier()
result = cross_val_score(model, X, Y, cv=kfold)
print(result.mean())
# 贝叶斯分类器:概率 朴素贝叶斯
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
filename = 'D:/0520代码+数据/第3、4次课:代码+数据/pima_data.csv'
names = ['preq', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
array = data.values
X = array[:, 0:8]
Y = array[:, 8]
num_fold = 10
seed = 7
kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
model = GaussianNB()
result = cross_val_score(model, X, Y ,cv=kfold)
print(result.mean())
# 分类回归树 CART 决策树 基于基尼指数 是否 树的生成 树的剪枝
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
filename = 'D:/0520代码+数据/第3、4次课:代码+数据/pima_data.csv'
names = ['preq', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
array = data.values
X = array[:, 0:8]
Y = array[:, 8]
num_fold = 10
seed = 7
kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
model = DecisionTreeClassifier()
result = cross_val_score(model, X, Y, cv=kfold)
print(result.mean())
# SVM
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
filename = 'D:/0520代码+数据/第3、4次课:代码+数据/pima_data.csv'
names = ['preq', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
array = data.values
X = array[:, 0:8]
Y = array[:, 8]
num_fold = 10
seed = 7
kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
model = SVC()
result = cross_val_score(model, X, Y, cv=kfold)
print(result.mean())
机械学系之分类算法
最新推荐文章于 2024-10-04 05:34:26 发布