△python版本:2.7.10,win32
1、安装scikit-learn需要依赖的python安装包有:
python(>=2.6)
NumPy(>=1.3)
SciPy(>=0.7)
查看python各安装包的版本:pip list
import pandas as pd # 使用import读入pandas模块,并且为了方便使用其缩写pd指代
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import GaussianNB # 引入高斯朴素贝叶斯
from sklearn.decomposition import PCA
"""
获取数据集
"""
# 通过read_excel函数读入excel文件,读入之后变为pandas的DataFrame对象
df = pd.read_excel('C:\\Users\\ihor\\Desktop\\totalData.xlsx')
x = df.iloc[:,0:30] #特征
y = df.iloc[:,30:31] #类别
x1 = df.iloc[0:18,0:30] #1类的特征
y1 = df.iloc[0:18,30:31] #1类的类别
x2 = df.iloc[18:45,0:30] #2类的特征
y2 = df.iloc[18:45,30:31] #2类的类别
"""
PCA降维
"""
pca = PCA(n_components='mle',copy=True) #'mle':自动选取特征个数n,使得满足所要求的方差百分比
newX1 = pca.fit_transform(x)
#newX2 = pca.fit_transform(x2)
print(type(newX1))
#print("保留的成分个数:",pca.n_components_)
#print(pca.explained_variance_ratio_) #返回所保留的n个成分各自的方差百分比
"""
训练集、测试集划分
"""
# 避免过拟合,采用交叉验证,验证集占训练集30%,固定随机种子(random_state)
x_train1,x_test1,y_train1,y_test1 = train_test_split(newX1,y1,test_size=3/10,random_state=10)
x_train2,x_test2,y_train2,y_test2 = train_test_split(newX2,y2,test_size=3/10,random_state=10)
x_train = [x_train1 , x_train2]
x_test = [x_test1 , x_test2]
y_train = [y_train1 , y_train2]
y_test = [y_test1 , y_test2]
x_train = pd.concat(x_train)
x_test = pd.concat(x_test)
y_train = pd.concat(y_train)
y_test = pd.concat(y_test)
print("测试集正确分类结果:")
print(y_test)
"""
高斯朴素贝叶斯分类
"""
# 实例化
gnb = GaussianNB()
# fit拟合模型,predict模型预测
y_pred = gnb.fit(x_train,y_train).predict(x_test)
#y_pred = gnb.fit(newX,y).predict(newX)
print("分类结果:",y_pred)
# 准确度评估
accuracy = gnb.score(x_test,y_test)
#accuracy = gnb.score(newX,y)
print("准确度:",accuracy)
import pandas as pd # 使用import读入pandas模块,并且为了方便使用其缩写pd指代
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import GaussianNB # 引入高斯朴素贝叶斯
from sklearn.decomposition import PCA
import numpy as np
"""
获取数据集
"""
# 通过read_excel函数读入excel文件,读入之后变为pandas的DataFrame对象
df = pd.read_excel('C:\\Users\\ihor\\Desktop\\totalData.xlsx')
x = df.iloc[:,0:30] #特征
y = df.iloc[:,30:31] #类别
#x1 = df.iloc[0:18,0:30] #1类的特征
y1 = df.iloc[0:18,30:31] #1类的类别
#x2 = df.iloc[18:45,0:30] #2类的特征
y2 = df.iloc[18:45,30:31] #2类的类别
"""
PCA降维
"""
pca = PCA(n_components='mle',copy=True) #'mle':自动选取特征个数n,使得满足所要求的方差百分比
newX = pca.fit_transform(x)
# 拆分
newX1 = newX[0:18,:]
newX2 = newX[18:45,:]
#print("保留的成分个数:",pca.n_components_)
#print(pca.explained_variance_ratio_) #返回所保留的n个成分各自的方差百分比
"""
训练集、测试集划分
"""
# 避免过拟合,采用交叉验证,验证集占训练集30%,固定随机种子(random_state)
x_train1,x_test1,y_train1,y_test1 = train_test_split(newX1,y1,test_size=3/10,random_state=0)
x_train2,x_test2,y_train2,y_test2 = train_test_split(newX2,y2,test_size=3/10,random_state=0)
x_train = np.vstack((x_train1 , x_train2))
x_test = np.vstack((x_test1 , x_test2))
y_train = np.vstack((y_train1 , y_train2))
y_test = np.vstack((y_test1 , y_test2))
print("测试集正确分类结果:")
#print(y_test)
"""
高斯朴素贝叶斯分类
"""
# 实例化
gnb = GaussianNB()
# fit拟合模型,predict模型预测
y_pred = gnb.fit(x_train,y_train).predict(x_test)
#y_pred = gnb.fit(newX,y).predict(newX)
print("分类结果:",y_pred)
# 准确度评估
accuracy = gnb.score(x_test,y_test)
#accuracy = gnb.score(newX,y)
print("准确度:",accuracy)