线性回归
import numpy as np
import matplotlib.pyplot as plt
x = np.random.uniform(-3, 3, size=100)
X = x.reshape(-1, 1)
y = 0.5 * x**2 + x + 2 + np.random.normal(0, 1, 100)
X.shape
(100, 1)
X
array([[-0.89951778],
[ 2.50872999],
[ 2.4353359 ],
[-0.86763174],
[-2.72925348],
[-1.45719219],
[-0.54025571],
[-0.53760242],
[ 1.85970595],
[ 1.57230038],
[ 1.56990009],
[ 2.28305174],
[-2.59112168],
[-2.64334139],
[-2.76567062],
[ 0.68005198],
[ 0.33708501],
...
[-1.01140515],
[ 2.74215254],
[-2.73004737],
[-0.51508284]])
y
array([ 0.79647154, 8.88232529, 8.30787094, -0.05238261, 2.47342507,
1.9590172 , 0.49101351, 0.62394121, 5.72435864, 4.97987883,
5.3199278 , 7.58792764, 3.82694073, 2.11215395, 2.77798657,
3.69331054, 2.35618172, 3.97818497, 1.94798774, 8.45894137,
3.66989025, 3.99332214, 0.89851189, 0.73492175, 3.38890123,
5.19282411, 0.68377759, 1.61374746, 2.98310585, 0.59630839,
2.49700182, 8.92275413, 2.70647328, 10.61161191, 0.61439641,
1.86262655, 3.91924111, 2.741702 , 0.76651707, -0.55544964,
2.99670829, 3.48079184, 4.25653902, 1.21532141, 6.71560707,
3.39264928, 3.71641698, 8.00766354, 2.91326196, 2.09527705,
4.01907449, 1.2866292 , 2.45767153, 2.55749774, 2.0515609 ,
5.2849331 , 3.6588314 , 1.4383397 , 9.3591929 , 0.77173848,
1.08971906, 0.77206578, 3.36298872, 3.47117943, 3.62177969,
2.61081094, 1.28841343, 8.23299834, 6.39067367, 2.34850602,
2.50032685, 2.11977966, 1.68345708, 5.56751009, 1.8023396 ,
2.53554222, 10.5724512 , 1.52939331, 2.88805634, 2.7674932 ,
2.46133868, 2.15665785, 6.2066511 , 3.72760328, 6.19854463,
3.9359559 , 7.82418792, 1.5854534 , 2.13656437, 5.4053754 ,
5.77317218, 3.30323698, 5.0131409 , 3.84984313, 2.22021921,
3.08100348, 1.30739181, 9.26113351, 3.28547074, 1.739159 ])
x
array([-0.89951778, 2.50872999, 2.4353359 , -0.86763174, -2.72925348,
-1.45719219, -0.54025571, -0.53760242, 1.85970595, 1.57230038,
1.56990009, 2.28305174, -2.59112168, -2.64334139, -2.76567062,
0.68005198, 0.33708501, 0.08753101, -0.14196215, 2.35022604,
-2.88592106, 0.9784988 , -0.16498468, -0.83966014, 0.09322257,
1.6060534 , -1.90553413, -2.49284729, 0.13463158, -1.36955521,
0.84564178, 2.93781771, -2.69662918, 2.87649987, -0.19828651,
-0.84546172, -1.16323851, -1.30587637, -0.43285034, -1.195054 ,
-2.37222469, -2.59924062, -0.33995236, -1.16191355, 2.1116139 ,
-0.67849173, 1.49261461, 2.64106724, -1.12484179, 0.17088782,
-0.87039745, 0.03791021, 0.65267102, -0.07678643, -2.834112 ,
-2.98717084, -2.54342704, -0.46389904, 2.80043107, 0.15577566,
-0.59394665, 0.04709297, 0.72619359, -0.69563873, -0.98681543,
-2.8884292 , -0.10776948, 2.31101697, 2.47586736, -0.54017805,
0.68440894, -1.36355552, 0.55904675, 1.95387484, -2.22727105,
-1.78151621, 2.92648781, 0.74270975, 0.51638477, 0.49765519,
-1.21760458, 0.24396438, 2.18207184, 1.44166375, 1.50676197,
0.94095377, 2.4879327 , -2.74947577, -2.35523714, 2.17110775,
2.06786061, -2.08955051, 1.54528951, 1.57435462, -0.41458426,
0.12795122, -1.01140515, 2.74215254, -2.73004737, -0.51508284])
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X,y)
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)
y_predict = lin_reg.predict(X)
plt.scatter(x,y)
plt.plot(x,y_predict,color='r')
plt.show()
(X**2).shape
(100, 1)
X2 = np.hstack([X,X**2])
X2.shape
(100, 2)
lin_reg2= LinearRegression()
lin_reg2.fit(X2,y)
y_predict2 = lin_reg2.predict(X2)
plt.scatter(x,y)
plt.plot(np.sort(x),y_predict2[np.argsort(x)],color='r')
plt.show()
lin_reg2.coef_
array([1.04309307, 0.51881223])
lin_reg2.intercept_
2.085560841403531
import numpy as np
import matplotlib.pyplot as plt
x = np.random.uniform(-3, 3, size=100)
X = x.reshape(-1, 1)
y = 0.5 * x**2 + x + 2 + np.random.normal(0, 1, 100)
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree = 2)
poly.fit(X)
X2 = poly.transform(X)
X2[:5,:]
X[:5,:]
array([[-1.22238232],
[-2.7805248 ],
[-1.66953336],
[ 1.9736027 ],
[-0.44709313]])
X2[:5,:]
array([[ 1. , -1.22238232, 1.49421854],
[ 1. , -2.7805248 , 7.73131818],
[ 1. , -1.66953336, 2.78734164],
[ 1. , 1.9736027 , 3.89510764],
[ 1. , -0.44709313, 0.19989227]])
X[:5,:]**2
array([[1.49421854],
[7.73131818],
[2.78734164],
[3.89510764],
[0.19989227]])
from sklearn.linear_model import LinearRegression
lin_reg2 = LinearRegression()
lin_reg2.fit(X2,y)
y_predict2 = lin_reg2.predict(X2)
plt.scatter(x,y)
plt.plot(np.sort(x),y_predict2[np.argsort(x)],color='r')
plt.show()
lin_reg2.coef_
array([0. , 0.97132627, 0.52604774])
lin_reg2.intercept_
2.106219901302948
X = np.arange(1, 11).reshape(-1, 2)
X
array([[ 1, 2],
[ 3, 4],
[ 5, 6],
[ 7, 8],
[ 9, 10]])
poly = PolynomialFeatures(degree=2)
poly.fit(X)
X2 = poly.transform(X)
X2.shape
(5, 6)
X2
array([[ 1., 1., 2., 1., 2., 4.],
[ 1., 3., 4., 9., 12., 16.],
[ 1., 5., 6., 25., 30., 36.],
[ 1., 7., 8., 49., 56., 64.],
[ 1., 9., 10., 81., 90., 100.]])
x = np.random.uniform(-3, 3, size=100)
X = x.reshape(-1, 1)
y = 0.5 * x**2 + x + 2 + np.random.normal(0, 1, 100)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
poly_reg = Pipeline([
("poly",PolynomialFeatures(degree=2)),
("std_scaler",StandardScaler()),
("lin_reg",LinearRegression())
])
poly_reg.fit(X,y)
y_predict = poly_reg.predict(X)
plt.scatter(x,y)
plt.plot(np.sort(x),y_predict[np.argsort(x)],color='r')
plt.show()
from sklearn.metrics import mean_squared_error
y_predict = lin_reg.predict(X)
mean_squared_error(y,y_predict)
3.4992559719694816
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
def PolynomialRegression(degree):
return Pipeline([
("poly", PolynomialFeatures(degree=degree)),
("std_scaler", StandardScaler()),
("lin_reg", LinearRegression())
])
poly2_reg = PolynomialRegression(degree=2)
poly2_reg.fit(X, y)
Pipeline(memory=None,
steps=[('poly',
PolynomialFeatures(degree=2, include_bias=True,
interaction_only=False, order='C')),
('std_scaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('lin_reg',
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None,
normalize=False))],
verbose=False)
y2_predict = poly2_reg.predict(X)
mean_squared_error(y, y2_predict)
1.1365841098986502
plt.scatter(x, y)
plt.plot(np.sort(x), y2_predict[np.argsort(x)], color='r')
plt.show()
poly100_reg = PolynomialRegression(degree=100)
poly100_reg.fit(X, y)
y100_predict = poly100_reg.predict(X)
mean_squared_error(y, y100_predict)
0.6663717898573763
plt.scatter(x, y)
plt.plot(np.sort(x), y100_predict[np.argsort(x)], color='r')
plt.show()
X_plot = np.linspace(-3, 3, 100).reshape(100, 1)
y_plot = poly100_reg.predict(X_plot)
plt.scatter(x, y)
plt.plot(X_plot[:,0], y_plot, color='r')
plt.axis([-3, 3, 0, 10])
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
lin_reg = LinearRegression()
lin_reg.fit(X_train,y_train)
y_predict = lin_reg.predict(X_test)
mean_squared_error(y_test,y_predict)
3.104170213714221
poly2_reg = PolynomialRegression(degree=2)
poly2_reg.fit(X_train, y_train)
y2_predict = poly2_reg.predict(X_test)
mean_squared_error(y_test, y2_predict)
1.2217154614153332
poly10_reg = PolynomialRegression(degree=10)
poly10_reg.fit(X_train, y_train)
y10_predict = poly10_reg.predict(X_test)
mean_squared_error(y_test, y10_predict)
1.0900969891737575
poly100_reg = PolynomialRegression(degree=100)
poly100_reg.fit(X_train, y_train)
y100_predict = poly100_reg.predict(X_test)
mean_squared_error(y_test, y100_predict)
910991014982.5587
np.random.seed(666)
x = np.random.uniform(-3.0, 3.0, size=100)
X = x.reshape(-1, 1)
y = 0.5 * x**2 + x + 2 + np.random.normal(0, 1, size=100)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
X_train.shape
(75, 1)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
train_score = []
test_score = []
for i in range(1, 76):
lin_reg = LinearRegression()
lin_reg.fit(X_train[:i], y_train[:i])
y_train_predict = lin_reg.predict(X_train[:i])
train_score.append(mean_squared_error(y_train[:i], y_train_predict))
y_test_predict = lin_reg.predict(X_test)
test_score.append(mean_squared_error(y_test, y_test_predict))
plt.plot([i for i in range(1, 76)], np.sqrt(train_score), label="train")
plt.plot([i for i in range(1, 76)], np.sqrt(test_score), label="test")
plt.legend()
plt.show()
def plot_learning_curve(algo, X_train, X_test, y_train, y_test):
train_score = []
test_score = []
for i in range(1, len(X_train)+1):
algo.fit(X_train[:i], y_train[:i])
y_train_predict = algo.predict(X_train[:i])
train_score.append(mean_squared_error(y_train[:i], y_train_predict))
y_test_predict = algo.predict(X_test)
test_score.append(mean_squared_error(y_test, y_test_predict))
plt.plot([i for i in range(1, len(X_train)+1)],
np.sqrt(train_score), label="train")
plt.plot([i for i in range(1, len(X_train)+1)],
np.sqrt(test_score), label="test")
plt.legend()
plt.axis([0, len(X_train)+1, 0, 4])
plt.show()
plot_learning_curve(LinearRegression(), X_train, X_test, y_train, y_test)
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def PolynomialRegression(degree):
return Pipeline([
("poly", PolynomialFeatures(degree=degree)),
("std_scaler", StandardScaler()),
("lin_reg", LinearRegression())
])
poly2_reg = PolynomialRegression(degree=2)
plot_learning_curve(poly2_reg, X_train, X_test, y_train, y_test)
poly20_reg = PolynomialRegression(degree=20)
plot_learning_curve(poly20_reg, X_train, X_test, y_train, y_test)
import numpy as np
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=666)
from sklearn.neighbors import KNeighborsClassifier
best_k, best_p, best_score = 0, 0, 0
for k in range(2, 11):
for p in range(1, 6):
knn_clf = KNeighborsClassifier(weights="distance", n_neighbors=k, p=p)
knn_clf.fit(X_train, y_train)
score = knn_clf.score(X_test, y_test)
if score > best_score:
best_k, best_p, best_score = k, p, score
print("Best K =", best_k)
print("Best P =", best_p)
print("Best Score =", best_score)
Best K = 3
Best P = 4
Best Score = 0.9860917941585535
交叉验证
from sklearn.model_selection import cross_val_score
knn_clf = KNeighborsClassifier()
cross_val_score(knn_clf, X_train, y_train)
array([0.98895028, 0.97777778, 0.96629213])
best_knn_clf = KNeighborsClassifier(weights="distance", n_neighbors=2, p=2)
best_knn_clf.fit(X_train, y_train)
best_knn_clf.score(X_test, y_test)
0.980528511821975
from sklearn.model_selection import GridSearchCV
param_grid = [
{
'weights': ['distance'],
'n_neighbors': [i for i in range(2, 11)],
'p': [i for i in range(1, 6)]
}
]
grid_search = GridSearchCV(knn_clf, param_grid, verbose=1)
grid_search.fit(X_train, y_train)
解决高方差的手段:
1.降低模型的复杂度
2.减少数据维度,降噪
3.增加样本数
4.使用验证集
5.模型正则化
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(42)
x = np.random.uniform(-3.0, 3.0, size=100)
X = x.reshape(-1, 1)
y = 0.5 * x + 3 + np.random.normal(0, 1, size=100)
plt.scatter(x, y)
plt.show()
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def PolynomialRegression(degree):
return Pipeline([
("poly", PolynomialFeatures(degree=degree)),
("std_scaler", StandardScaler()),
("lin_reg", LinearRegression())
])
np.random.seed(666)
X_train, X_test, y_train, y_test = train_test_split(X, y)
poly_reg = PolynomialRegression(degree=20)
poly_reg.fit(X_train, y_train)
y_poly_predict = poly_reg.predict(X_test)
mean_squared_error(y_test, y_poly_predict)
167.9401086890128
X_plot = np.linspace(-3, 3, 100).reshape(100, 1)
y_plot = poly_reg.predict(X_plot)
plt.scatter(x, y)
plt.plot(X_plot[:,0], y_plot, color='r')
plt.axis([-3, 3, 0, 6])
plt.show()
def plot_model(model):
X_plot = np.linspace(-3, 3, 100).reshape(100, 1)
y_plot = model.predict(X_plot)
plt.scatter(x, y)
plt.plot(X_plot[:,0], y_plot, color='r')
plt.axis([-3, 3, 0, 6])
plt.show()
plot_model(poly_reg)
使用岭回归
from sklearn.linear_model import Ridge
def RidgeRegression(degree, alpha):
return Pipeline([
("poly", PolynomialFeatures(degree=degree)),
("std_scaler", StandardScaler()),
("ridge_reg", Ridge(alpha=alpha))
])
ridge1_reg = RidgeRegression(20, 0.0001)
ridge1_reg.fit(X_train, y_train)
y1_predict = ridge1_reg.predict(X_test)
mean_squared_error(y_test, y1_predict)
1.3233492754155471
plot_model(ridge1_reg)
ridge2_reg = RidgeRegression(20, 1)
ridge2_reg.fit(X_train, y_train)
y2_predict = ridge2_reg.predict(X_test)
mean_squared_error(y_test, y2_predict)
1.1888759304218448
plot_model(ridge2_reg)
LASSO Regression
from sklearn.linear_model import Lasso
def LassoRegression(degree, alpha):
return Pipeline([
("poly", PolynomialFeatures(degree=degree)),
("std_scaler", StandardScaler()),
("lasso_reg", Lasso(alpha=alpha))
])
lasso1_reg = LassoRegression(20, 0.01)
lasso1_reg.fit(X_train, y_train)
y1_predict = lasso1_reg.predict(X_test)
mean_squared_error(y_test, y1_predict)
1.1496080843259964
plot_model(lasso1_reg)
lasso1_reg = LassoRegression(20, 0.1)
lasso1_reg.fit(X_train, y_train)
y1_predict = lasso1_reg.predict(X_test)
mean_squared_error(y_test, y1_predict)
plot_model(lasso1_reg)
mean_squared_error(y_test, y1_predict)
1.1213911351818648
lasso 趋向与使得一部分theta值变为0。所以可以作为特征选择使用
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(666)
X = np.random.normal(0, 1, size=(200, 2))
y = np.array((X[:,0]**2+X[:,1]**2)<1.5, dtype='int')
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
plt.show()
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
def PolynomialLogisticRegression(degree):
return Pipeline([
('poly', PolynomialFeatures(degree=degree)),
('std_scaler', StandardScaler()),
('log_reg', LogisticRegression())
])
poly_log_reg = PolynomialLogisticRegression(degree=2)
poly_log_reg.fit(X, y)
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(666)
X = np.random.normal(0, 1, size=(200, 2))
y = np.array((X[:,0]**2+X[:,1])<1.5, dtype='int')
for _ in range(20):
y[np.random.randint(200)] = 1
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=None, max_iter=100,
multi_class='warn', n_jobs=None, penalty='l2',
random_state=None, solver='warn', tol=0.0001, verbose=0,
warm_start=False)
log_reg.score(X_train, y_train)
0.7933333333333333
log_reg.score(X_test, y_test)
0.86
OvR 和 OvO
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:,:2]
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(multi_class='ovr',solver='liblinear')
log_reg.fit(X_train, y_train)
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=None, max_iter=100,
multi_class='ovr', n_jobs=None, penalty='l2',
random_state=None, solver='liblinear', tol=0.0001, verbose=0,
warm_start=False)
log_reg.score(X_test, y_test)
0.6578947368421053
log_reg2 = LogisticRegression(multi_class="multinomial", solver="newton-cg")
log_reg2.fit(X_train, y_train)
log_reg2.score(X_test, y_test)
0.7894736842105263
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
log_reg2 = LogisticRegression(multi_class="multinomial", solver="newton-cg")
log_reg2.fit(X_train, y_train)
log_reg2.score(X_test, y_test)
1.0
from sklearn.multiclass import OneVsRestClassifier
ovr = OneVsRestClassifier(log_reg)
ovr.fit(X_train, y_train)
ovr.score(X_test, y_test)
/root/anaconda3/envs/python36/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
/root/anaconda3/envs/python36/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
/root/anaconda3/envs/python36/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
0.9473684210526315
import numpy as np
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target.copy()
y[digits.target==9] = 1
y[digits.target!=9] = 0
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
log_reg.score(X_test, y_test)
/root/anaconda3/envs/python36/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
0.9755555555555555
y_log_predict = log_reg.predict(X_test)
def TN(y_true, y_predict):
assert len(y_true) == len(y_predict)
return np.sum((y_true == 0) & (y_predict == 0))
def FP(y_true, y_predict):
assert len(y_true) == len(y_predict)
return np.sum((y_true == 0) & (y_predict == 1))
def FN(y_true, y_predict):
assert len(y_true) == len(y_predict)
return np.sum((y_true == 1) & (y_predict == 0))
def TP(y_true, y_predict):
assert len(y_true) == len(y_predict)
return np.sum((y_true == 1) & (y_predict == 1))
print(TN(y_test, y_log_predict))
print(FP(y_test, y_log_predict))
print(FN(y_test, y_log_predict))
print(TP(y_test, y_log_predict))
403
2
9
36
def confusion_matrix(y_true, y_predict):
return np.array([
[TN(y_true, y_predict), FP(y_true, y_predict)],
[FN(y_true, y_predict), TP(y_true, y_predict)]
])
confusion_matrix(y_test, y_log_predict)
array([[403, 2],
[ 9, 36]])
def precision_score(y_true, y_predict):
tp = TP(y_true, y_predict)
fp = FP(y_true, y_predict)
try:
return tp / (tp + fp)
except:
return 0.0
def recall_score(y_true, y_predict):
tp = TP(y_true, y_predict)
fn = FN(y_true, y_predict)
try:
return tp / (tp + fn)
except:
return 0.0
precision_score(y_test, y_log_predict)
0.9473684210526315
recall_score(y_test, y_log_predict)
0.8
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_log_predict)
array([[403, 2],
[ 9, 36]])
from sklearn.metrics import precision_score
precision_score(y_test, y_log_predict)
0.9473684210526315
from sklearn.metrics import recall_score
recall_score(y_test, y_log_predict)
0.8
from sklearn.metrics import f1_score
y_pred = [0, 1, 1, 1, 2, 2]
y_true = [0, 1, 0, 2, 1, 1]
print(f1_score(y_true, y_pred, average='macro'))
print(f1_score(y_true, y_pred, average='weighted'))
0.3333333333333333
0.38888888888888884
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target.copy()
y[digits.target==9] = 1
y[digits.target!=9] = 0
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
y_predict = log_reg.predict(X_test)
from sklearn.metrics import f1_score
f1_score(y_test, y_predict)
/root/anaconda3/envs/python36/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
0.8674698795180723
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_predict)
array([[403, 2],
[ 9, 36]])
from sklearn.metrics import precision_score
precision_score(y_test, y_predict)
0.9473684210526315
log_reg.decision_function(X_test)[:10]
array([-22.05699096, -33.02939331, -16.21333688, -80.37914686,
-48.25125897, -24.54005571, -44.39169519, -25.04292453,
-0.97829172, -19.71744304])
decision_scores = log_reg.decision_function(X_test)
print(np.min(decision_scores))
print(np.max(decision_scores))
-85.68607304951716
19.889580375580753
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target.copy()
y[digits.target==9] = 1
y[digits.target!=9] = 0
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
decision_scores = log_reg.decision_function(X_test)
/root/anaconda3/envs/python36/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
precisions = []
recalls = []
thresholds = np.arange(np.min(decision_scores), np.max(decision_scores), 0.1)
for threshold in thresholds:
y_predict = np.array(decision_scores >= threshold, dtype='int')
precisions.append(precision_score(y_test, y_predict))
recalls.append(recall_score(y_test, y_predict))
plt.plot(thresholds, precisions)
plt.plot(thresholds, recalls)
plt.show()
Precision-Recall 曲线
plt.plot(precisions, recalls)
plt.show()
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_test, decision_scores)
plt.plot(thresholds, precisions[:-1])
plt.plot(thresholds, recalls[:-1])
plt.show()
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-pRFhQgtL-1585400100885)(output_104_0.png)]
plt.plot(precisions, recalls)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:,2:]
y = iris.target
def split(X, y, d, value):
index_a = (X[:,d] <= value)
index_b = (X[:,d] > value)
return X[index_a], X[index_b], y[index_a], y[index_b]
from collections import Counter
from math import log
def entropy(y):
counter = Counter(y)
res = 0.0
for num in counter.values():
p = num / len(y)
res += -p * log(p)
return res
def try_split(X, y):
best_entropy = float('inf')
best_d, best_v = -1, -1
for d in range(X.shape[1]):
sorted_index = np.argsort(X[:,d])
for i in range(1, len(X)):
if X[sorted_index[i], d] != X[sorted_index[i-1], d]:
v = (X[sorted_index[i], d] + X[sorted_index[i-1], d])/2
X_l, X_r, y_l, y_r = split(X, y, d, v)
e = entropy(y_l) +entropy(y_r)
if e < best_entropy:
best_entropy, best_d, best_v = e, d, v
return best_entropy, best_d, best_v
best_entropy, best_d, best_v = try_split(X, y)
print("best_entropy =", best_entropy)
print("best_d =", best_d)
print("best_v =", best_v)
best_entropy = 0.6931471805599453
best_d = 0
best_v = 2.45
X1_l, X1_r, y1_l, y1_r = split(X, y, best_d, best_v)
print(entropy(y1_l))
0.0
entropy(y1_r)
0.6931471805599453
best_entropy2, best_d2, best_v2 = try_split(X1_r, y1_r)
print("best_entropy =", best_entropy2)
print("best_d =", best_d2)
print("best_v =", best_v2)
best_entropy = 0.4132278899361904
best_d = 1
best_v = 1.75