机器学习python分类算法

from pandas import read_csv
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
filename = 'pima_data.csv'
names = ['preg','plas','pres','skin','test','mass','pedi','age','class']
data = read_csv(filename,names = names)
array = data.values
X = array[:,0:8]
Y = array[:,8]
num_folds = 10
seed = 7
kfold = KFold(n_splits=num_folds,random_state=seed)

#逻辑回归(线性算法)
# model = LogisticRegression()
# result = cross_val_score(model,X,Y,cv=kfold)
# print(result.mean())

#线性判别分析(线性算法)
'''将高维的模式样本投影到最佳鉴别矢量空间,可以抽取分类信息和压缩特征空间维数'''
# model = LinearDiscriminantAnalysis()
# result = cross_val_score(model,X,Y,cv=kfold)
# print(result.mean())

#非线性算法
#K近邻算法
'''如果一个样本在特征空间的K个最相似的样本中的大多数属于一个类别,该样本也属于这个类别'''
# model = KNeighborsClassifier()
# result = cross_val_score(model,X,Y,cv=kfold)
# print(result.mean())

#贝叶斯分类器
'''先计算先验概率,用贝叶斯公式计算出后验概率,最小错误率上的优化'''
# model = GaussianNB()
# result = cross_val_score(model,X,Y,cv=kfold)
# print(result.mean())

#分类与回归树
# model = DecisionTreeClassifier()
# result = cross_val_score(model,X,Y,cv=kfold)
# print(result.mean())

#支持向量机SVM
# model = SVC()
# result = cross_val_score(model,X,Y,cv=kfold)
# print(result.mean())

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值