PyML(三)——用sklearn训练logistic模型

数据集为iris数据集,可以重点关注lr.coef_

# -*- coding: utf-8 -*-
# @Time    : 2018/7/19 10:53
# @Author  : Alan
# @Email   : xiezhengwen2013@163.com
# @File    : logistic_sk1.py
# @Software: PyCharm

from sklearn.linear_model import LogisticRegression
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from matplotlib.colors import ListedColormap

iris = datasets.load_iris()
X = iris.data[:,[2,3]]
y = iris.target
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state = 0)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
X_combined_std = np.vstack((X_train_std,X_test_std))
y_combined_std = np.hstack((y_train,y_test))
lr = LogisticRegression(C=1000.0,random_state= 0)
lr.fit(X_train_std,y_train)
def plot_decision_regions(X, y, classifier,test_idx=None, resolution=0.02):
    # setup marker generator and color map
    markers = ('s', 'x', 'o', '^', 'v')
    colors = ('red', 'blue', 'lightgreen', 'cyan','gray')
    cmap = ListedColormap(colors[:len(np.unique(y))])
    # plot the decision surface
    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
                           np.arange(x2_min, x2_max, resolution))
    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
    Z = Z.reshape(xx1.shape)
    plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
    plt.xlim(xx1.min(), xx1.max())
    plt.ylim(xx2.min(), xx2.max())
    # plot all samples
    #X_test, y_test = X[test_idx, :], y[test_idx]
    for idx, cl in enumerate(np.unique(y)):
        plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
                    alpha=0.8, c=cmap(idx),
                    marker=markers[idx], label=cl)
    # highlight test samples
    if test_idx:
        X_test, y_test = X[test_idx, :], y[test_idx]
        plt.scatter(X_test[:,0], X_test[:,1], c='',
                alpha=1.0, linewidth=1, marker='o',
                s=55, label='test set')

plot_decision_regions(X_combined_std,y_combined_std,classifier=lr,test_idx = range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
print(lr.predict_proba(X_test_std))
print(lr.score(X_test_std,y_test))

weights, params = [], []
for c in np.arange(-5,5,dtype=float):
    lr = LogisticRegression(C=10**c,random_state=0)
    lr.fit(X_train_std,y_train)
    weights.append(lr.coef_[1])
    #lr.coef是权重w,array, shape (1, n_features) or (n_classes, n_features)
    params.append(10**c)
weights = np.array(weights)
plt.plot(params,weights[:,0],label='petal length')
plt.plot(params, weights[:, 1], linestyle='--',label='petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc='upper left')
plt.xscale('log')
plt.show()

结果:

[[2.05743774e-11 6.31620264e-02 9.36837974e-01]
 [6.08753106e-04 9.99285569e-01 1.05678028e-04]
 [8.16962653e-01 1.83037347e-01 6.48299403e-14]
 [1.60866539e-11 4.14084479e-01 5.85915521e-01]
 [8.00231776e-01 1.99768224e-01 1.07576258e-13]
 [2.03283749e-13 1.38353563e-01 8.61646437e-01]
 [8.64726967e-01 1.35273033e-01 9.27705289e-14]
 [4.92680691e-07 7.61844645e-01 2.38154862e-01]
 [6.63326495e-07 8.43292211e-01 1.56707126e-01]
 [8.06447009e-05 9.97737477e-01 2.18187798e-03]
 [9.03984802e-09 4.62107297e-01 5.37892694e-01]
 [1.54213753e-06 8.75626483e-01 1.24371975e-01]
 [5.21240195e-06 9.81860065e-01 1.81347231e-02]
 [8.78337572e-07 8.24894420e-01 1.75104701e-01]
 [1.13609831e-06 8.90634747e-01 1.09364117e-01]
 [7.63277074e-01 2.36722926e-01 4.48028267e-14]
 [1.54213753e-06 8.75626483e-01 1.24371975e-01]
 [2.17883410e-05 9.95361393e-01 4.61681913e-03]
 [8.50297533e-01 1.49702467e-01 1.54539513e-13]
 [8.92312846e-01 1.07687154e-01 2.19265907e-13]
 [1.36185935e-09 1.37889641e-01 8.62110357e-01]
 [1.54213753e-06 8.75626483e-01 1.24371975e-01]
 [7.30041284e-01 2.69958716e-01 8.08556048e-13]
 [8.16962653e-01 1.83037347e-01 6.48299403e-14]
 [1.31849235e-08 2.33504838e-01 7.66495148e-01]
 [8.76597816e-01 1.23402184e-01 8.44713225e-15]
 [8.03965919e-01 1.96034081e-01 4.68344939e-12]
 [1.77840766e-05 9.92031267e-01 7.95094912e-03]
 [1.90628534e-02 9.80932392e-01 4.75462295e-06]
 [8.52060302e-01 1.47939698e-01 1.01852930e-12]
 [5.05675088e-10 3.05943312e-01 6.94056688e-01]
 [1.54213753e-06 8.75626483e-01 1.24371975e-01]
 [8.00231776e-01 1.99768224e-01 1.07576258e-13]
 [7.81877559e-09 2.32190155e-01 7.67809837e-01]
 [1.29757333e-11 1.74476679e-01 8.25523321e-01]
 [7.17765160e-05 9.95828460e-01 4.09976396e-03]
 [8.02288088e-01 1.97711912e-01 7.09140126e-13]
 [1.75748033e-08 3.82812098e-01 6.17187884e-01]
 [7.84804343e-06 9.60367415e-01 3.96247367e-02]
 [1.54998206e-04 9.99065670e-01 7.79331753e-04]
 [4.48901735e-11 1.93094174e-01 8.06905826e-01]
 [8.33068956e-01 1.66931044e-01 3.90241396e-14]
 [4.74332140e-11 8.17822383e-02 9.18217762e-01]
 [9.06082741e-01 9.39172591e-02 5.70722898e-12]
 [8.00231776e-01 1.99768224e-01 1.07576258e-13]]
0.9777777777777777

图片:

reference:

《python machine learning》

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值