#load_breast_cancer
from sklearn.datasets import load_breast_cancer
cancer=load_breast_cancer()
len(cancer)
type(cancer)
cancer_data=cancer['data']
cancer_target=cancer['target']
cancer_names=cancer['feature_names']
cancer_desc=cancer['DESCR']
cancer_data.shape
cancer_target.shape
from sklearn.model_selection import train_test_split
cancer_data_train,cancer_data_test,cancer_target_train,cancer_target_test=\
train_test_split(cancer_data,cancer_target,test_size=0.2,random_state=42)
cancer_data_train.shape
cancer_data_test.shape
cancer_target_train.shape
cancer_target_test.shape
import numpy as np
from sklearn.preprocessing import MinMaxScaler
Scaler=MinMaxScaler().fit(cancer_data_train)
cancer_trainScaler=Scaler.transform(cancer_data_train)
cancer_testScaler=Scaler.transform(cancer_data_test)
np.min(cancer_data_train)
np.min(cancer_trainScaler)
np.max(cancer_data_train)
np.max(cancer_trainScaler)
np.min(cancer_data_test)
np.min(cancer_testScaler)
np.max(cancer_data_test)
np.max(cancer_testScaler)
from sklearn.decomposition import PCA
pca_model=PCA(n_components=10).fit(cancer_trainScaler)
cancer_trainPca=pca_model.transform(cancer_trainScaler)
cancer_testPca=pca_model.transform(cancer_testScaler)
cancer_trainScaler.shape
cancer_trainPca.shape
cancer_testScaler.shape
cancer_testPca.shape
#load_boston
import numpy as np
from sklearn.datasets import load_boston
boston=load_boston()
boston_data=boston['data']
boston_target=boston['target']
boston_names=boston['feature_names']
boston_data.shape
boston_target.shape
boston_names.shape
from sklearn.model_selection import train_test_split
boston_data_train,boston_data_test,boston_target_train,boston_target_test=\
train_test_split(boston_data,boston_target,test_size=0.2,random_state=42)
boston_data_train.shape
boston_data_test.shape
boston_target_train.shape
boston_target_test.shape
from sklearn.preprocessing import StandardScaler
stdScale=StandardScaler().fit(boston_data_train)
boston_trainScaler=stdScale.transform(boston_data_train)
boston_testScaler=stdScale.transform(boston_data_test)
np.var(boston_trainScaler)
np.mean(boston_trainScaler)
np.var(boston_testScaler)
np.mean(boston_testScaler)
from sklearn.decomposition import PCA
pca=PCA(n_components=5).fit(boston_trainScaler)
boston_trainPca=pca.transform(boston_trainScaler)
boston_testPca=pca.transform(boston_testScaler)
boston_trainPca.shape
boston_testPca.shape
#构建并且评价聚类模型
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
iris=load_iris()
iris_data=iris['data']
iris_target=iris['target']
scale=MinMaxScaler().fit(iris_data)
iris_dataScale=scale.transform(iris_data)
kmeans=KMeans(n_clusters=3,random_state=42).fit(iris_dataScale)
result=kmeans.predict([[1.5,1.5,1.5,1.5]])
result[0]
#可视化处理
import pandas as pd
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne=TSNE(n_components=2,init='random',random_state=177).fit(iris_data)
df=pd.DataFrame(tsne.embedding_)
df['labels']=kmeans.labels_
df1=df[df['labels']==0]
df2=df[df['labels']==1]
df3=df[df['labels']==2]
fig=plt.figure(figsize=(9,6))
plt.plot(df1[0],df1[1],'bo',df2[0],df2[1],'r*',df3[0],df3[1],'gD')
plt.show()
#评价聚类模型
from sklearn.metrics import fowlkes_mallows_score
for i in range(2,7):
kmeans=KMeans(n_clusters=i,random_state=123).fit(iris_data)
score=fowlkes_mallows_score(iris_target,kmeans.labels_)
print('iris数据聚%d类FMI评价分值为:%f' %(i,score))
#使用轮廓系数评价法评价聚类模型
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
silhouettteScore=[]
for i in range(2,15):
kmeans=KMeans(n_clusters=i,random_state=123).fit(iris_data)
score=silhouette_score(iris_data,kmeans.labels_)
silhouettteScore.append(score)
plt.figure(figsize=(10,6))
plt.plot(range(2,15),silhouettteScore,linewidth=1.5,linestyle='-')
plt.show()
#使用Calinska-harabasz指数评价聚类模型
from sklearn.metrics import calinski_harabaz_score
for i in range(2,7):
kmeans=KMeans(n_clusters=i,random_state=123).fit(iris_data)
score=calinski_harabaz_score(iris_data,kmeans.labels_)
print('iris数据聚%d类calinski_harabaz指数评价分值为:%f' %(i,score))
#6-15
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
seeds=pd.read_csv("C:\\data\\seeds_dataset.txt",sep='\t')
seeds_data=seeds.iloc[:,:7]
seeds_target=seeds.iloc[:,7]
stdScale=StandardScaler().fit(seeds_data)
seeds_dataScale=stdScale.transform(seeds_data)
kmeans=KMeans(n_clusters=3,random_state=42).fit(seeds_data)
from sklearn.metrics import calinski_harabaz_score
for i in range(2,7):
kmeans=KMeans(n_clusters=i,random_state=123).fit(seeds_data)
score=calinski_harabaz_score(seeds_data,kmeans.labels_)
print('seeds数据聚%d类calinski_harabaz指数评价分值为:%f' %(i,score))
#分类模型
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
cancer=load_breast_cancer()
cancer_data=cancer['data']
cancer_target=cancer['target']
from sklearn.model_selection import train_test_split
cancer_data_train,cancer_data_test,cancer_target_train,cancer_target_test=\
train_test_split(cancer_data,cancer_target,test_size=0.2,random_state=42)
stdScaler=StandardScaler().fit(cancer_data_train)
cancer_trainStd=stdScaler.transform(cancer_data_train)
cancer_testStd=stdScaler.transform(cancer_data_test)
svm=SVC().fit(cancer_trainStd,cancer_target_train)
cancer_target_pred=svm.predict(cancer_testStd)
true=np.sum(cancer_target_pred==cancer_target_test)
cancer_target_test.shape[0]-true
true/cancer_target_test.shape[0]
#6-19 分类模型评价
from sklearn.metrics import accuracy_score,precision_score,\
recall_score,f1_score,cohen_kappa_score
accuracy_score(cancer_target_test,cancer_target_pred)
precision_score(cancer_target_test,cancer_target_pred)
recall_score(cancer_target_test,cancer_target_pred)
f1_score(cancer_target_test,cancer_target_pred)
cohen_kappa_score(cancer_target_test,cancer_target_pred)
from sklearn.metrics import classification_report
classification_report(cancer_target_test,cancer_target_pred)
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
fpr,tpr,thresholds=roc_curve(cancer_target_test,cancer_target_pred)
plt.figure(figsize=(10,6))
plt.xlim(0,1)
plt.ylim(0.0,1.1)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.plot(fpr,tpr)
plt.show()
#6-22
import pandas as pd
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
abalone=pd.read_csv("C:\\data\\abalone.data",sep=',')
abalone_data=abalone.iloc[:,:8]
abalone_target=abalone.iloc[:,8]
sex=pd.get_dummies(abalone_data['sex'])
abalone_data=pd.concat([abalone_data,sex],axis=1)
abalone_data.drop('sex',axis=1,inplace=True)
abalone_data_train,abalone_data_test,abalone_target_train,abalone_target_test=\
train_test_split(abalone_data,abalone_target,train_size=0.8,random_state=42)
stdScaler=StandardScaler().fit(abalone_data_train)
abalone_trainStd=stdScaler.transform(abalone_data_train)
abalone_testStd=stdScaler.transform(abalone_data_test)
svm=SVC().fit(abalone_trainStd,abalone_target_train)
abalone_target_pred=svm.predict(abalone_testStd)
classification_report(abalone_target_test,abalone_target_pred)
#构建并评价回归模型
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
boston=load_boston()
X=boston['data']
y=boston['target']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=125)
clf=LinearRegression().fit(X_train,y_train)
y_pred=clf.predict(X_test)
#回归结果可视化
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['font.sans-serif']='SimHei'
fig=plt.figure(figsize=(10,6))
plt.plot(range(y_test.shape[0]),y_test,color='blue')
plt.plot(range(y_test.shape[0]),y_pred,color='red')
plt.xlim((0,102))
plt.ylim((0,55))
plt.legend(['真实值','预测值'])
plt.show()
from sklearn.metrics import explained_variance_score,mean_absolute_error,\
mean_squared_error,median_absolute_error,r2_score
mean_absolute_error(y_test,y_pred)
mean_squared_error(y_test,y_pred)
median_absolute_error(y_test,y_pred)
explained_variance_score(y_test,y_pred)
r2_score(y_test,y_pred)
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
house=pd.read_csv("C:\\data\\cal_housing.data",sep=',',header=None)
house_data=house.iloc[:,:-1]
house_target=house.iloc[:,-1]
house_train,house_test,house_target_train,house_target_test=\
train_test_split(house_data,house_target,test_size=0.2,random_state=42)
GBR_house=GradientBoostingRegressor().fit(house_train,house_target_train)
house_target_pred=GBR_house.predict(house_test)
from sklearn.metrics import explained_variance_score,mean_absolute_error,\
mean_squared_error,median_absolute_error,r2_score
mean_absolute_error(house_target_test,house_target_pred)
mean_squared_error(house_target_test,house_target_pred)
median_absolute_error(house_target_test,house_target_pred)
explained_variance_score(house_target_test,house_target_pred)
r2_score(house_target_test,house_target_pred)
#实训1
import pandas as pd
from sklearn.model_selection import train_test_split
wine=pd.read_csv("C:\\data\\wine.csv",sep=',',encoding='gbk')
winequality=pd.read_csv("C:\\data\\winequality.csv",sep=';',encoding='gbk')
wine_data=wine.iloc[:,1:].values
wine_target=wine.iloc[:,0].values
winequality_data=winequality.iloc[:,:-1].values
winequality_target=winequality.iloc[:,-1].values
winequality_data_train,winequality_data_test,winequality_target_train,winequality_target_test=\
train_test_split(winequality_data,winequality_target,test_size=0.2,random_state=42)
from sklearn.preprocessing import MinMaxScaler
Scaler=MinMaxScaler().fit(wine_data)
wine_Scaler=Scaler.transform(wine_data)
from sklearn.preprocessing import MinMaxScaler
Scaler=MinMaxScaler().fit(winequality_data_train)
winequality_trainScaler=Scaler.transform(winequality_data_train)
from sklearn.decomposition import PCA
pca=PCA(n_components=7).fit(wine_Scaler)
wine_pca=pca.transform(wine_Scaler)
from sklearn.decomposition import PCA
pca=PCA(n_components=6).fit(winequality_trainScaler)
winequality_trainpca=pca.transform(winequality_trainScaler)
#实训2
from sklearn.cluster import KMeans
kmeans=KMeans(n_clusters=3,random_state=42).fit(wine_Scaler)
print(kmeans)
from sklearn.metrics import fowlkes_mallows_score
score=fowlkes_mallows_score(wine_target,kmeans.labels_)
print(score)
for i in range(2,11):
kmeans=KMeans(n_clusters=i,random_state=42).fit(wine_Scaler)
score=fowlkes_mallows_score(wine_target,kmeans.labels_)
print('wine数据聚%d类FMI评价分值为:%f' %(i,score))
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
silhouetteScore=[]
for i in range(2,15):
kmeans=KMeans(n_clusters=i,random_state=42).fit(wine_Scaler)
score=silhouette_score(wine_Scaler,kmeans.labels_)
silhouetteScore.append(score)
plt.figure(figsize=(10,6))
plt.plot(range(2,15),silhouetteScore,linewidth=1.5,linestyle="-")
plt.show()
from sklearn.metrics import calinski_harabaz_score
for i in range(2,11):
kmeans=KMeans(n_clusters=i,random_state=42).fit(wine_Scaler)
score=calinski_harabaz_score(wine_Scaler,kmeans.labels_)
print('wine数据聚%d类calinski_harabaz指数为:%f' %(i,score))
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 26 08:23:34 2019
@author: Dell
"""
#实训1
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
wine=pd.read_csv("C:\\data\\wine.csv",sep=',',encoding='gbk')
winequality=pd.read_csv("C:\\data\\winequality.csv",sep=';',encoding='gbk')
wine_data=wine.iloc[:,1:]
wine_target=wine.iloc[:,0]
winequality_data=winequality.iloc[:,:-1]
winequality_target=winequality.iloc[:,-1]
wine_data_train,wine_data_test,wine_target_train,wine_target_test=\
train_test_split(wine_data,wine_target,test_size=0.2,random_state=42)
print(wine_data_train.shape)
print(wine_data_test.shape)
print(wine_target_train.shape)
print(wine_target_test.shape)
winequality_data_train,winequality_data_test,winequality_target_train,winequality_target_test=\
train_test_split(winequality_data,winequality_target,test_size=0.2,random_state=42)
print(winequality_data_train.shape)
print(winequality_data_test.shape)
print(winequality_target_train.shape)
print(winequality_target_test.shape)
from sklearn.preprocessing import MinMaxScaler
Scaler=MinMaxScaler().fit(wine_data_train)
wine_trainScaler=Scaler.transform(wine_data_train)
wine_testScaler=Scaler.transform(wine_data_test)
print(np.min(wine_trainScaler))
print(np.max(wine_trainScaler))
print(np.min(wine_testScaler))
print(np.max(wine_testScaler))
from sklearn.preprocessing import StandardScaler
stdScaler=StandardScaler().fit(wine_data_train)
wine_trainScaler=stdScaler.transform(wine_data_train)
wine_testScaler=stdScaler.transform(wine_data_test)
print(np.min(wine_trainScaler))
print(np.max(wine_trainScaler))
print(np.min(wine_testScaler))
print(np.max(wine_testScaler))
from sklearn.preprocessing import MinMaxScaler
Scaler=MinMaxScaler().fit(winequality_data_train)
winequality_trainScaler=Scaler.transform(winequality_data_train)
winequality_testScaler=Scaler.transform(winequality_data_test)
print(np.min(winequality_trainScaler))
print(np.max(winequality_trainScaler))
print(np.min(winequality_testScaler))
print(np.max(winequality_testScaler))
from sklearn.preprocessing import StandardScaler
stdScaler=StandardScaler().fit(winequality_data_train)
winequality_trainScaler=stdScaler.transform(winequality_data_train)
winequality_testScaler=stdScaler.transform(winequality_data_test)
print(np.min(winequality_trainScaler))
print(np.max(winequality_trainScaler))
print(np.min(winequality_testScaler))
print(np.max(winequality_testScaler))
from sklearn.decomposition import PCA
pca=PCA(n_components=5).fit(wine_trainScaler)
wine_trainpca=pca.transform(wine_trainScaler)
wine_testpca=pca.transform(wine_testScaler)
from sklearn.decomposition import PCA
pca=PCA(n_components=5).fit(winequality_trainScaler)
winequality_trainpca=pca.transform(winequality_trainScaler)
winequality_testpca=pca.transform(winequality_testScaler)
#实训2
from sklearn.cluster import KMeans
kmeans=KMeans(n_clusters=3,random_state=42).fit(wine_trainScaler)
from sklearn.metrics import fowlkes_mallows_score
score=fowlkes_mallows_score(wine_target_train,kmeans.labels_)
print(score)
for i in range(2,11):
kmeans=KMeans(n_clusters=i,random_state=42).fit(wine_trainScaler)
score=fowlkes_mallows_score(wine_target_train,kmeans.labels_)
print('wine数据聚%d类FMI评价分值为:%f' %(i,score))
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
silhouetteScore=[]
for i in range(2,15):
kmeans=KMeans(n_clusters=i,random_state=42).fit(wine_trainScaler)
score=silhouette_score(wine_trainScaler,kmeans.labels_)
silhouetteScore.append(score)
plt.figure(figsize=(10,6))
plt.plot(range(2,15),silhouetteScore,linewidth=1.5,linestyle="-")
plt.show()
from sklearn.metrics import calinski_harabaz_score
for i in range(2,11):
kmeans=KMeans(n_clusters=i,random_state=42).fit(wine_trainScaler)
score=calinski_harabaz_score(wine_trainScaler,kmeans.labels_)
print('wine数据聚%d类calinski_harabaz指数为:%f' %(i,score))
#实训3
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
wine=pd.read_csv("C:\\data\\wine.csv",sep=',',encoding='gbk')
wine_data=wine.iloc[:,1:]
wine_target=wine.iloc[:,0]
wine_data_train,wine_data_test,wine_target_train,wine_target_test=\
train_test_split(wine_data,wine_target,test_size=0.2,random_state=42)
from sklearn.preprocessing import MinMaxScaler
Scaler=MinMaxScaler().fit(wine_data_train)
wine_trainScaler=Scaler.transform(wine_data_train)
wine_testScaler=Scaler.transform(wine_data_test)
from sklearn.svm import SVC
svm=SVC().fit(wine_trainScaler,wine_target_train)
wine_target_pred=svm.predict(wine_testScaler)
from sklearn.metrics import classification_report
classification_report(wine_target_test,wine_target_pred)
#实训4
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
winequality=pd.read_csv("C:\\data\\winequality.csv",sep=';',encoding='gbk')
winequality_data=winequality.iloc[:,:-1]
winequality_target=winequality.iloc[:,-1]
winequality_data_train,winequality_data_test,winequality_target_train,winequality_target_test=\
train_test_split(winequality_data,winequality_target,test_size=0.2,random_state=42)
from sklearn.preprocessing import MinMaxScaler
Scaler=MinMaxScaler().fit(winequality_data_train)
winequality_trainScaler=Scaler.transform(winequality_data_train)
winequality_testScaler=Scaler.transform(winequality_data_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error,mean_absolute_error,explained_variance_score
clf=LinearRegression().fit(winequality_trainScaler,winequality_target_train)
winequality_target_pred=clf.predict(winequality_testScaler)
mean_squared_error(winequality_target_test,winequality_target_pred)
mean_absolute_error(winequality_target_test,winequality_target_pred)
explained_variance_score(winequality_target_test,winequality_target_pred)
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error,mean_absolute_error,explained_variance_score
GBR=GradientBoostingRegressor().fit(winequality_trainScaler,winequality_target_train)
winequality_target_pred=GBR.predict(winequality_testScaler)
mean_squared_error(winequality_target_test,winequality_target_pred)
mean_absolute_error(winequality_target_test,winequality_target_pred)
explained_variance_score(winequality_target_test,winequality_target_pred)
#操作题1
from sklearn.datasets import load_iris
iris=load_iris()
iris_data=iris['data']
iris_target=iris['target']
from sklearn.model_selection import train_test_split
iris_data_train,iris_data_test,iris_target_train,iris_target_test=\
train_test_split(iris_data,iris_target,test_size=0.2,random_state=42)
from sklearn.decomposition import PCA
pca=PCA(n_components=3).fit(iris_data_train)
iris_trainPca=pca.transform(iris_data_train)
iris_testPca=pca.transform(iris_data_test)
from sklearn.svm import SVC
svm=SVC().fit(iris_data_train,iris_target_train)
iris_target_pred=svm.predict(iris_data_test)
from sklearn.metrics import accuracy_score,precision_score,recall_score,cohen_kappa_score,f1_score
accuracy_score(iris_target_test,iris_target_pred)
precision_score(iris_target_test,iris_target_pred)
recall_score(iris_target_test,iris_target_pred)
cohen_kappa_score(iris_target_test,iris_target_pred)
f1_score(iris_target_test,iris_target_pred)
from sklearn.metrics import classification_report
classification_report(iris_target_test,iris_target_pred)
sklearn预处理,训练集测试集,聚类分类回归
最新推荐文章于 2023-03-03 23:36:48 发布
本文深入探讨了如何使用sklearn库进行数据预处理,包括标准化、归一化等步骤,以提升模型的性能。接着,介绍了如何划分训练集和测试集,以评估模型的泛化能力。此外,还详细讲解了sklearn中的聚类算法如KMeans,以及分类和回归模型的使用,如逻辑回归和支持向量机。
摘要由CSDN通过智能技术生成