day 9.0 逻辑回归- 梯度下降

# max_iter 控制步长
# max_iter越大,步长越小,迭代次数大,模型时间长,反之
from sklearn.linear_model import LogisticRegression as LR
from sklearn.datasets import load_breast_cancer
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.feature_selection import SelectFromModel

data = load_breast_cancer()
X = data.data
Y = data.target
# print(data.data.shape)
l2 = []
l2test = []
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.3, random_state=420)

# todo : 步长
# for i in np.arange(1, 201, 10):
#     lrl2 = LR(penalty='l2', solver="liblinear", C=0.8, max_iter=i)
#     lrl2 = lrl2.fit(Xtrain, Ytrain)
#     l2.append(accuracy_score(lrl2.predict(Xtrain), Ytrain))
#     l2test.append(accuracy_score(lrl2.predict(Xtest), Ytest))
# graph = [l2, l2test]
# color = ['black', 'gray']
# label = ['L2', 'L2test']
# plt.figure(figsize=(20, 5))
# for i in range(len(graph)):
#     plt.plot(np.arange(1, 201,10), graph[i], color[i], label=label[i])
# plt.legend(loc=4)
# plt.xticks(np.arange(1, 201, 10))
# plt.show()

# 属性 n_iter_ 调用本次求解中真正实现的迭代次数
lr = LR(penalty='l2', solver='liblinear', C=0.8, max_iter=1000).fit(Xtrain, Ytrain)
print(lr.n_iter_)  # [24]
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值