下面列举了常见的机器学习算法的sklearn接口。
这是目录
1、LinearRegression
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression #线性回归
iris = load_iris() #加载数据集
X,y = iris.data,iris.target #(150,4)
train_X,test_X,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=2020) #划分训练集和测试集
model = LinearRegression() #线性回归模型
model.fit(train_X,train_y) #模型训练
predict_y = model.predict(test_X) #模型预测
score = model.score(test_X,test_y) #模型评估,R2 score
print(predict_y)
print(test_y)
print(score)
2、LogisticRegression
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
iris = load_iris()
X,y = iris.data, iris.target
train_X,test_X,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=2020)
model = LogisticRegression()
model.fit(train_X,train_y)
predict_y = model.predict(test_X) #模型预测,predict是训练后返回预测结果,是标签值。
#predict_y_prob = model.predict_proba(test_X)#predict_proba返回的是一个 n 行 k 列的数组, 第 i 行 第 j 列上的数值是模型预测 第 i 个预测样本为某个标签的概率,并且每一行的概率和为1。
score = model.score(test_X,test_y) #模型评估,R2 score
print(predict_y)
#print(predict_y_prob)
print(test_y)
print(score)
3、KNeighborsClassifier
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
iris = load_iris()
X,y = iris.data, iris.target
train_X,test_X,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=2020)
model = KNeighborsClassifier(n_neighbors=1)
model.fit(train_X,train_y)
predict_y = model.predict(test_X) #模型预测
#predict_y_prob = model.predict_proba(test_X)#输出每一类的概率[为0类的概率,为1类的概率,为2类的概率]
score = model.score(test_X,test_y) #模型评估,R2 score
print(predict_y)
#print(predict_y_prob)
print(test_y)
print(score)
4、SVM
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
iris = load_iris()
X,y = iris.data, iris.target
train_X,test_X,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=2020)
model = SVC()
model.fit(train_X,train_y)
predict_y = model.predict(test_X)
score = model.score(test_X,test_y)
print(test_y)
print(predict_y)
print(score)
5、naive_bayes
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
iris = load_iris()
X,y = iris.data, iris.target
train_X,test_X,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=2020)
model = GaussianNB()
model.fit(train_X,train_y)
predict_y = model.predict(test_X)
score = model.score(test_X,test_y)
print(test_y)
print(predict_y)
print(score)
6、DecisionTree
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X,y = iris.data, iris.target
train_X,test_X,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=2020)
model = DecisionTreeClassifier(criterion='gini')
model.fit(train_X,train_y)
predict_y = model.predict(test_X)
score = model.score(test_X,test_y)
print(test_y)
print(predict_y)
print(score)
7、K-means
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn import metrics
X, y = make_blobs(n_samples=1000, n_features=2, centers=[[-1,-1], [0,0], [1,1], [2,2]], cluster_std=[0.4, 0.2, 0.2, 0.2],
random_state =9) #生成聚类的数据集
train_X,test_X = train_test_split(X,test_size=0.2,random_state=2020)
plt.figure()
plt.scatter(test_X[:, 0], test_X[:, 1], marker='o')
plt.xlabel('before')
plt.show()
model = KMeans(n_clusters=3,random_state=2020)
model.fit(train_X) #无监督训练
predict_y = model.predict(test_X)
plt.figure()
plt.scatter(test_X[:, 0], test_X[:, 1], c = predict_y,marker='o')
plt.xlabel('after')
plt.show()
score = metrics.calinski_harabasz_score(test_X, predict_y) #评分很高时,簇的密度越高,划分越好,这也关系到一个聚类的标准性
print(score)
8、RandomForest
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
iris = load_iris()
X,y = iris.data, iris.target
train_X,test_X,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=2020)
model = RandomForestClassifier()
model.fit(train_X,train_y)
predict_y = model.predict(test_X)
score = model.score(test_X,test_y)
print(test_y)
print(predict_y)
print(score)
9、GBDT
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingRegressor
iris = load_iris()
X,y = iris.data, iris.target
train_X,test_X,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=2020)
model = GradientBoostingClassifier(n_estimators=100,learning_rate=0.1,max_depth=1,random_state=2020)
model.fit(train_X,train_y)
predict_y = model.predict(test_X)
score = model.score(test_X,test_y)
print(test_y)
print(predict_y)
print(score)
10、PCA
from sklearn.decomposition import PCA
from sklearn import datasets
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
X ,y= datasets.make_blobs(n_samples=10000,n_features=3,centers=[[0,0,0],[3,3,3],[1,1,1],[2,2,2]],cluster_std=[0.2,0.1,0.2,0.2],random_state=2020)#生成3簇的聚类数据
fig = plt.figure()
ax = Axes3D(fig, rect=[0, 0, 1, 1], elev=30, azim=20)
plt.scatter(X[:,0],X[:,1],X[:,2],marker='o')
plt.show()
# pca = PCA(n_components=3)
# pca.fit(X)
# print(pca.explained_variance_ratio_)#特征维度方差
# print(pca.explained_variance_)
#
# pca = PCA(n_components=2)
# pca.fit(X)
# print(pca.explained_variance_ratio_)#特征维度方差
# print(pca.explained_variance_)
#
# pca = PCA(n_components=0.95)
# pca.fit(X)
# print(pca.explained_variance_ratio_)#特征维度方差
# print(pca.explained_variance_)
#降维
pca = PCA(n_components=2)#留下2个特征
X_new = pca.fit(X).transform(X)
print(X_new.shape)
plt.scatter(X_new[:,0],X_new[:,1])
plt.xlabel('Dimension1')
plt.ylabel('Dimension2')
plt.title('Iris')
plt.show()
(10000, 2)
11、xgboost
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from xgboost import plot_importance
import matplotlib.pyplot as plt
iris = load_iris()
train_X,test_X,train_y,test_y = train_test_split(iris.data,iris.target,test_size=0.2,random_state=2020)
model = XGBClassifier()
model.fit(train_X,train_y)
plot_importance(model,height=0.5,max_num_features=64)#分析了特征的重要程度,通过函数plot_importance绘制图片
plt.show()
predict_y = model.predict(test_X)
score = model.score(test_X,test_y)
print(test_y)
print(predict_y)
print(score)