一、数据集分成训练集与评估集
1、按2:1把数据分成训练集与评估集合
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
iris = datasets.load_iris()
names = ['separ-length','separ-width','petal-length','petal-width','class']
data = pd.read_csv(r'iris.csv',names = names)
array = data.values
X = array[:,0:4]
Y = array[:,4]
test_size = 0.33
seed = 7
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size = test_size, random_state = seed)
model = LogisticRegression()
model.fit(X_train,Y_train)
result = model.score(X_test,Y_test)
print("算法评估结果: %.3f%%" %(result * 100))
运行结果:
2、按4:1比例分离数据
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
iris = datasets.load_iris()
names = ['separ-length','separ-width','petal-length','petal-width','class']
data = pd.read_csv(r'iris.csv',names = names)
array = data.values
X = array[:,0:4]
Y = array[:,4]
test_size = 0.2
seed = 7
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size = test_size, random_state = seed)
model = LogisticRegression()
model.fit(X_train,Y_train)
result = model.score(X_test,Y_test)
print("算法评估结果: %.3f%%" %(result * 100))
运行结果:
二、K折交叉验证分离
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
iris = datasets.load_iris()
names = ['separ-length','separ-width','petal-length','petal-width','class']
data = pd.read_csv(r'iris.csv',names = names)
array = data.values
X = array[:,0:4]
Y = array[:,4]
num_folds = 10
seed = 7
kfold = KFold(n_splits = num_folds, random_state = seed)
model = LogisticRegression()
result = cross_val_score(model,X,Y,cv = kfold)
print("算法评估结果:%.2f%% (%.2f%%)" %(result.mean()*100,result.std()*100))
运行结果:
三、弃一交叉验证分离
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
iris = datasets.load_iris()
names = ['separ-length','separ-width','petal-length','petal-width','class']
data = pd.read_csv(r'iris.csv',names = names)
array = data.values
X = array[:,0:4]
Y = array[:,4]
loocv = LeaveOneOut()
model = LogisticRegression()
result = cross_val_score(model,X,Y,cv = loocv)
print("算法评估结果:%.2f%% (%.2f%%)" %(result.mean()*100,result.std()*100))
运行结果:
四、重复随机分离
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
iris = datasets.load_iris()
names = ['separ-length','separ-width','petal-length','petal-width','class']
data = pd.read_csv(r'iris.csv',names = names)
array = data.values
X = array[:,0:4]
Y = array[:,4]
n_splits = 10
test_size = 0.33
seed = 7
kfold = ShuffleSplit(n_splits = n_splits, test_size = test_size, random_state = seed)
model = LogisticRegression()
result = cross_val_score(model,X,Y,cv = kfold)
print("算法评估结果:%.2f%% (%.2f%%)" %(result.mean()*100,result.std()*100))
运行结果: