机器学习各种算法汇总模板

机器学习算法模板

包含了KNN,线性回归,逻辑回归,朴素贝叶斯,决策树,支持向量机,随机森林,kmeans,集成算法

各种算法,特征工程,评估方式任你选择!!!

# 导包
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier

from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import silhouette_score, silhouette_samples
import graphviz
from sklearn.tree import export_graphviz

from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV

import sklearn.datasets as datasets

from sklearn.linear_model import Ridge
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler

from sklearn.metrics import mean_squared_error as MSE
from sklearn.metrics import r2_score as R2
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

# #获取数据
# data = []
# data = datasets.load_iris()
# data = datasets.load_boston()  # 小规模数据集
# data = datasets.load_wine()
# data = datasets.load_breast_cancer()
# data = datasets.fetch_california_housing(data_home="./datasets")  # 大规模数据集
# data = pd.read_csv("")
# data = pd.read_excel("")  # 从本地读取文件

# 提取特征值, 查看特征确保特征是二维
# feature = data["data"]
# target = data["target"]
# print(feature.shape, target.shape)

# 如果不是二维,则增加维度
# feature = feature.reshape(())


# 特征工程:
# 1.特征值化:map映射,onehot编码;
# 1.1 onehot编码
# r1 = pd.get_dummies(df['color'],dtype = int)
# new_df = pd.concat((df,r1),axis=1).drop(targets='color',axis=1) # 行合并,去列
# 1.2 map映射
# new_df['new_size'] = new_df['size'].map({'M':1,'L':2,'XL':3})  # map映射方法后重命名
# new_df = new_df.drop(targets='size',axis=1)  # 行合并,去列

# 2.无量纲化:归一化,标准化
# 2.1归一化
# mm = MinMaxScaler() # 类实例化
# feature = mm.fit_transform(feature)  # 训练
# 2.2标准化
# ss = StandardScaler() # 类实例化
# feature = ss.fit_transform(feature)  # 训练

# 3.特征选择:过滤,降维
# 3.1 特征选择和过滤
# 文件读取 特征值选择
# data = pd.read_csv('cancer.csv',sep='\t')
# fea_col_index = data.columns[data.columns != 'Diagnosis']
# feature = data[fea_col_index].drop(targets='ID',axis=1)
#
# #threshold:方差的阈值 实例化
# v = VarianceThreshold(threshold=0.2)
# result = v.fit_transform(feature)
# print(result.shape)
#
# # 特征方差中位数作为阈值
# import numpy as np
# median_value = np.median(feature.var(axis=0).values)
# v = VarianceThreshold(threshold=median_value)
# result = v.fit_transform(feature)
# print(result.shape)

# 3.2
# PCA 降维
# 将数据分解为较低维度的空间
# n_components可以为小数(保留特征的百分比),整数(减少到的特征数量)
# pca = PCA(n_components=2)
# res = pca.fit_transform([[0,2,4,3],[0,3,7,3],[0,9,6,3]])
# print(res)  #将(3,4)变成(3,2)

# 4.多项式回归(欠拟合增加特征数量)
# p2 = PolynomialFeatures(degree = 2,  include_bias = False)
# feature_2 = p2.fit_transform(feature)

# 5.岭回归(过拟合减少高次特征权重)
# r = Ridge(alpha = 9).
# r.fit(feature, target)
# print(r.coef_)

# 6.词频统计,中文文本划分

# 6.1多项式词频统计,基于TF-TDF特征值化(英文文本)
# tf = TfidfVectorizer()
# feature = tf.fit_transform(feature)
# #
# x_train, x_test, y_train, y_test = train_test_split(
#     feature, target, test_size=0.1, random_state=2021
# )
# 6.2 如果是中文文本,就要先用jieba对中文词语拆分
# text = [
#     '因为在自然语言处理中,我们是需要将一段中文文本中相关的词语,成语,形容词......都要进行抽取的',
#     '目前CountVectorizer只可以对有标点符号和用分隔符对应的文本进行特征抽取,显然这是满足不了我们日常需求的'
# ]
# new_text = []
# for t in text:
#     r = list(jieba.cut(t))  # 返回列表
#     s = ' '.join(r)  # 返回字符串
#     new_text.append(s)  # 返回一维列表
# print(result.toarray())  # 返回数组

# 7.分箱
# 7.1等距分箱:cut

# # 连续性变量
# ages = np.random.randint(1, 80, size=(25,))
#
# # 使用分箱对连续性变量进行离散化
# # bins:分箱的个数
# cut_result = pd.cut(ages, bins=5, labels=list("abcde"))
# print(cut_result)
# print(cut_result.value_counts())
#
# # 7.2等频分箱qcut
# # 使用分箱对连续性变量进行离散化
# # q:分箱的个数
# qcut_result = pd.qcut(ages, q=5, labels=list("abcde"))
# print(qcut_result)
# print(qcut_result.value_counts())


# 8.样本类别分布不均衡处理
# 8.1使用smote合成少数过采样技术
# s = SMOTE(k_neighbors=3)
# feature, target = s.fit_resample(df[[0, 1, 2]], df[3])
# 训练完成之后返回特征和标签
# print(feature.shape, target.shape)

# 8.2欠采样技术
# from imblearn.under_sampling import RandomUnderSampler


# 划分数据集
# x_train, x_test, y_train, y_test = train_test_split(
#     feature, target, test_size=0.2, random_state=2021
# )

# SVM: 调参优化:
# parameters = {'splitter':('best','random')
#               ,'criterion':("gini","entropy")
#               ,"max_depth":[*range(1,10)]
#               ,'min_samples_leaf':[*range(1,50,5)]
# }

# 模型训练,查看精度

# 0.已保存模型的加载

# 0.1 方式1
# import joblib
# joblib.load('xxx.model')  # 模型加载
# joblib.dump('')  # 模型保存  写在模型训练完成之后


# 0.2 方式2
# import pickle
# with open('../data/mm.pkl', 'rb') as f:
#     mm = pickle.load(f)  # 模型加载
# with open('../data/mm.pkl', 'wb') as f:
#     pickle.dump(mm, f)  # 模型保存  写在模型训练完成之后


# 1.分类模型训练

# 1.1KNN分类模型
# knn = KNeighborsClassifier(n_neighbors=75).fit(x_train, y_train)
# 训练完毕

# 1.2寻找最优k: 学习曲线&交叉验证
# ks = np.arange(3, 50)
# scores = []
# for k in ks:
#     knn = KNeighborsClassifier(n_neighbors=k)
#     score = cross_val_score(knn, x_train, y_train, cv = 5).mean()
#     scores.append(score)
# plt.plot(ks, scores, label="寻找最优K")
# plt.xlabel("k")
# plt.ylabel("score")
# # plt.show()
# best_k = ks[np.argmax(scores)]
# print(f"最优k是:{best_k}; 交叉验证最优精度:{max(scores)}")
# # 使用最优k训练模型
# knn = KNeighborsClassifier(n_neighbors=best_k).fit(x_train, y_train)
# # 1.1.1分类模型精度

# print(f"模型的分类结果{knn.predict(x_test)}")
# print(f"真实的分类结果{y_test}")
#
# # 分类模型评价指标
# y_pred = knn.predict(x_test)
# print(f"分类模型准确率:{knn.score(x_test, y_test)}")
# print(f"分类模型准确率:{accuracy_score(y_test, y_pred)}")
# print(f"分类模型召回率:{recall_score(y_test, y_pred, average = 'micro')}")
# print(f"分类模型精确率:{precision_score(y_test, y_pred, average = 'macro')}")
# print(f"分类模型精确率和召回率的调和平均数:{f1_score(y_test, y_pred, average = 'weighted')}")
# y_score = lre3.predict_proba(x_test)
# print(f"分类模型AUC曲线:{roc_auc_score(y_test, y_score, multi_class = 'ovr')}")


# # 1.2.1 逻辑回归(分类模型)
# # 1.2.1逻辑回归模型
# # 加入惩罚项(正则化方式),正则化力度(C),和求解器求解方式(坐标下降法)
# lre1 = LogisticRegression(penalty="l1", C=0.7, solver="liblinear").fit(x_train, y_train)
# lre2 = LogisticRegression(penalty="l2", C=0.7, solver="liblinear").fit(x_train, y_train)
# lre3 = LogisticRegression().fit(x_train, y_train)
# # 训练完毕
# y_pred = lre3.predict(x_test)
# print(f"分类模型准确率:{lre3.score(x_test, y_test)}")
# print(f"分类模型准确率:{accuracy_score(y_test, y_pred)}")
# print(f"分类模型召回率:{recall_score(y_test, y_pred, average = 'micro')}")
# print(f"分类模型精确率:{precision_score(y_test, y_pred, average = 'macro')}")
# print(f"分类模型精确率和召回率的调和平均数:{f1_score(y_test, y_pred, average = 'weighted')}")
# y_score = lre3.predict_proba(x_test)
# print(f"分类模型AUC曲线:{roc_auc_score(y_test, y_score, multi_class = 'ovr')}")


# 1.3.0 分类回归模型 : SVM
# 选择不同的核函数,计算最优超平面
# s1 = SVC(kernel='rbf').fit(x_train, y_train)
# s2 = SVC(kernel='poly').fit(x_train, y_train)
# s3 = SVC(kernel='linear').fit(x_train, y_train)
# s4 = SVC(kernel='sigmoid').fit(x_train, y_train)
# print('rbf:', f1_score(y_test, s1.predict(x_test)))
# print('ploy:', f1_score(y_test, s2.predict(x_test)))
# print('liner:', f1_score(y_test, s3.predict(x_test)))
# print('sigmoid:', f1_score(y_test, s4.predict(x_test)))

# # 1.4.0 分类模型: 决策树
# tree = DecisionTreeClassifier(random_state=2021).fit(x_train, y_train)
# GS = GridSearchCV(tree, parameters, cv=5)
# GS.fit(x_train, y_train)
# # 训练完毕
#
# # 模型评估
# print(tree.score(x_test, y_test))
# print(f"参数调优的最优参数:{GS.best_params_}")
# print(f"参数调优的最优精度:{GS.best_score_}")
#
#
# # 使用graphviz画出决策树,决策树可视化:
# feature_name = ['酒精', '苹果酸', '灰', '灰的碱性', '镁', '总酚', '类黄酮', '非黄烷类酚类', '花青素', '颜色强度', '色调', 'od280/od315稀释葡萄酒', '脯氨酸']
# dot_data = export_graphviz(tree, out_file=None  # 图片保存路径
#                            , feature_names=feature_name, class_names=["琴酒", "雪莉", "贝尔摩德"], filled=True,  fontname='SimSun'  # 使用颜色表示分类结果
#                            )
# graph = graphviz.Source(dot_data, format="png")
# graph.view()
#
# print(tree.feature_importances_, tree)  # 每一个维度特征的重要性
#
# # 返回特征的重要性
# feature_name = ['酒精', '苹果酸', '灰', '灰的碱性', '镁', '总酚', '类黄酮', '非黄烷类酚类', '花青素', '颜 色强度', '色调', 'od280/od315稀释葡萄酒', '脯氨酸']
# print("特征的重要程度")
# print([*zip(feature_name, tree.feature_importances_)])


# # 1.5 分类模型: 随机森林集成算法/加入决策树算法比较
# rfc = RandomForestClassifier(n_estimators = 10, random_state=2023).fit(x_train, y_train)
# dtc = DecisionTreeClassifier(random_state=2023).fit(x_train, y_train)
# # rfc = RandomForestClassifier().fit(x_train, y_train)
# # 训练结束
#
# # 交叉验证
#
# # 模型精度评估
# print(f"随机森林模型精度:{rfc.score(x_test, y_test)}")
# print(f"随机森林模型精度:{dtc.score(x_test, y_test)}")

# 集成算法:

# Bagging(装袋): 通过构建多个相互独立的基本模型,
# 然后将它们的预测结果进行平均或投票来提高整体模型的性能。
# Bagging 适用于高方差的模型,如决策树等,能够降低过拟合的风险。
#
# Boosting(提升): 通过串行地构建多个基本模型,
# 每个模型都在前一个模型的误差上进行学习,逐步提高整体模型的性能。
# Boosting 适用于低偏差的模型,如弱分类器或弱回归器,能够提升模型的预测能力。
#
# Stacking(堆叠): 通过将不同基本模型的预测结果作为特征,
# 再训练一个元模型来进行最终的预测。
# Stacking 适用于需要更高级别整合多个模型时,能够获得更好的泛化能力。


# # bagging (RandomForest)
# rfc_bagging = RandomForestClassifier(n_estimators = 10, random_state = 2023).fit(x_train, y_train)
# print(f"bagging装袋法模型精度:{rfc_bagging.score(x_test, y_test)}")
#
# # boosting(adaboost)
# base_classifier = DecisionTreeClassifier(max_depth=100, random_state=2023)
# ada_boosting = AdaBoostClassifier(base_estimator=base_classifier, n_estimators=10, random_state=2023).fit(x_train, y_train)
# print(f"AdaBoost提升法模型精度:{ada_boosting.score(x_test, y_test)}")
#
# # Stacking
# model1 = RandomForestClassifier(n_estimators=10, random_state=42).fit(x_train, y_train)
# model2 = DecisionTreeClassifier(max_depth=10, random_state=42).fit(x_train, y_train)
# pred1 = model1.predict(x_test)
# pred2 = model2.predict(x_test)
# x_stacked = np.column_stack((pred1, pred2))
# stacked_model = LogisticRegression().fit(x_stacked, y_test)
# print(f"Stacking 模型的准确率为:{stacked_model.score(x_stacked, y_test)}")


# 2回归模型训练

# 2.1线性回归模型
# linner = LinearRegression().fit(x_train, y_train)
# 训练完毕

# 2.1.1回归模型精度
# print(f"模型精度score:{linner.score(x_test,y_test)}")
#
# y_pred_test = linner.predict(x_test)
# print(f"模型精度R**2:   {R2(y_test, y_pred_test)}")
# R2:  1- 模型没有捕获到的信息量占真实标签中所带的信息量的比例
# R2越接近1越好

# y_pred_test = linner.predict(x_test)
# print(f"模型精度MSE:{MSE(y_test, y_pred_test)}")
# MSE:预测值和真实值之间的差异

# # 交叉验证得R2
# x1 = cross_val_score(linner, x_train, y_train, cv=5, scoring="r2").mean()
# x2 = cross_val_score(linner, x_test, y_test, cv=5, scoring="r2").mean()
# print(x1, x2)
# # 交叉验证得MSE
# x3 = cross_val_score(linner, x_train, y_train, cv=5, scoring='neg_mean_squared_error').mean()
# x4 = cross_val_score(linner, x_test, y_test, cv=5, scoring='neg_mean_squared_error').mean()
# print(x3, x4)

# 3. 概率模型训练
# 3.1 贝叶斯模型训练
# (多用于文档分类,先进行特征工程词频统计TfidfVectorizer)

# 3.1.1高斯模型 : 连续性特征
# g = GaussianNB().fit(x_train, y_train)
# 训练完毕

# # 贝叶斯模型预测类别和真实类别,最大概率分配到的标签类别
# print(f"概率模型预测标签类别{g.predict(x_test)}")
# print(f"概率模型真实标签类别{y_test}")
# # 3.1.1.1贝叶斯模型评分
# print(f"高斯贝叶斯模型的精度:{g.score(x_test, y_test)}")
#
# # 查看样本分到不同类别的概率
# print(g.predict_proba(x_test[20].reshape((1, -1))))
# # 概率的对数转化
# print(g.predict_log_proba(x_test[20].reshape((1, -1))))
#
#
# # 3.1.2 多项式模型: 主要适用于离散特征的概率计算
# # 3.1.2.1多项式项式模型训练和精度)
# m = MultinomialNB().fit(x_train, y_train)
# print(f"多项式贝叶斯模型的精度:{m.score(x_test, y_test)}")

# 10.1.1 无监督模型训练kmeans

# # 生成模拟数据
# X, _ = make_blobs(n_samples=300, centers=4, cluster_std=0.6, random_state=0)
#
# # 创建 KMeans 模型并拟合数据
# kmeans = KMeans(n_clusters=4)
# kmeans.fit(X)
#
# # 预测簇标签
# labels = kmeans.labels_
#
# # 获取簇中心点
# centers = kmeans.cluster_centers_

# # 绘制数据点和簇中心点
# plt.figure(figsize=(8, 6))
# plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', s=50, alpha=0.7, edgecolors='k')
# plt.scatter(centers[:, 0], centers[:, 1], c='red', marker='o', s=200, label='Cluster Centers')
# plt.title('KMeans Clustering')
# plt.xlabel('Feature 1')
# plt.ylabel('Feature 2')
# plt.legend()
# plt.show()
#
# # 计算整个数据集的轮廓系数
# score = silhouette_score(X, labels)
# print("整个数据集的轮廓系数:", score)
#
# # 计算每个样本的轮廓系数
# sample_silhouette_values = silhouette_samples(X, labels)
# print("每个样本的轮廓系数:", sample_silhouette_values)

  • 10
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值