注意:sklearn中scores函数的准确率是样本均衡后的准确率。
accuracy_score(y, self.predict(X),sample_weight=sample_weight)
加载数据
iris = datasets.load_iris()
iris_X = iris.data
iris_y = iris.target
loaded_data = datasets.load_boston()
data_X = loaded_data.data
data_y = loaded_data.target
X, y = datasets.make_regression(n_samples=100, n_features=1, n_targets=1, noise=10)# 自己模拟生成的虚拟数据
X, y = make_classification(n_samples=300, n_features=2 , n_redundant=0, n_informative=2,
random_state=22, n_clusters_per_class=1, scale=100) # 产生随机数据
数据预处理
##数据零均值标准方差
scaler = preprocessing.StandardScaler().fit(X)#得到X的均值和方差
scaler.transform(X) # 对X进行归一化处理,可以同样运用于测试集
##数据缩放到【0,1】范围内
min_max_scaler = preprocessing.MinMaxScaler()
##单位范数
X_normalized = preprocessing.normalize(X, norm='l2')
训练集和测试集分离
X_train, X_test, y_train, y_test = train_test_split(
iris_X, iris_y, test_size=0.3)
train_test_split(X, y, random_state=4)
模型训练
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
model = LinearRegression()
model.fit(data_X, data_y)
clf = SVC()
clf.fit(X_train, y_train)
模型预测
knn.predict(X_test)
model.predict(data_X[:4, :]) #只查看前几个样本
模型评估
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
print(clf.score(X_test, y_test))
model.score(data_X, data_y)
scores = cross_val_score(knn, X, y, cv=5, scoring='accuracy')
print(scores)
print(scores.mean())
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
loss = -cross_val_score(knn, X, y, cv=10, scoring='mean_squared_error')
k_scores.append(loss.mean())
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy')
k_scores.append(scores.mean())
GridSearchCV
RandomizedSearchCV
模型保存
# method: joblib
from sklearn.externals import joblib
# Save
joblib.dump(clf, 'save/clf.pkl')
# restore
clf3 = joblib.load('save/clf.pkl')
print(clf3.predict(X[0:1]))