Python常用工具库-实验(6个lab)-Scikit-learn

Scikit-learn

特征工程

from sklearn.preprocessing import MinMaxScaler  # 归一化API
from sklearn.preprocessing import StandardScaler
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
# onehot编码
data = [{'name': '张三', 'age': 20}, {
    'name': '李四', 'age': 24}, {'name': '王五', 'age': 18}]
# 实例化一个转换器类
transfer = DictVectorizer(sparse=False)
# 调用fit_transform
data = transfer.fit_transform(data)
print("返回的结果:¥n", data)
print("特征名字:¥n", transfer.get_feature_names_out())

l1 = [30, 1, 5]
l2 = [27, 3, 6]
l3 = [34, 2, 6]
df = pd.DataFrame([l1, l2, l3], columns=["a", "b", "c"])
print(df)

transfer = MinMaxScaler(feature_range=(0, 1))  # 实例化转换器
data = transfer.fit_transform(df[['a', 'b', "c"]])
print(data)

transfer = StandardScaler()
data = transfer.fit_transform(df[['a', "b", "c"]])
print(data)

回归算法

from sklearn.datasets import load_boston # 导入数据波士顿房价
from sklearn.linear_model import SGDRegressor#线性回归
from sklearn.model_selection import train_test_split#划分数据集
from sklearn.preprocessing import StandardScaler#数据标准化
from sklearn.metrics import mean_squared_error # 均方误差

boston = load_boston()
#导入加载的数据集
boston = load_boston()
print("数据维度: ",boston.data.shape)#查看数据维度
print("房价数据: ",boston.data)#查看数据
print("特征: ",boston.feature_names)#查看数据的特征名称
print("标签: ",boston.target)#查看标签数据

x_train,x_test, y_train,y_test = train_test_split(boston.data, boston.target,test_size=0.2, random_state=6)
print("训练集: ",x_train)
print("训练集维度:",x_train.shape)
print("测试集: ",x_test)
print("测试集维度: ",x_test.shape)

transfer = StandardScaler()# 实例化标准化对象
x_train = transfer.fit_transform(x_train)#将数据进行标准化
x_test = transfer.transform(x_test)
print("标准化: ",x_train)#数据在标准化后数值发生了改变,但是数据维度没有变化。
print('标准化后的维度: ',x_train.shape)

estimator = SGDRegressor()#线性回归
estimator.fit(x_train, y_train)#使用fit方法填充数据进行训练

y_predict = estimator.predict(x_test)# predict方法进行测试
print(y_predict)

error = mean_squared_error(y_test, y_predict)#均方误差
print(error)

#使用matplotlib进行可视化来查看预测结和只是结果的差异性
import matplotlib.pyplot as plt
plt.figure(figsize=(10,8))
plt.xlabel("x",fontsize=14)
plt.ylabel("y", fontsize=14)
plt.plot([i for i in range(len(y_test))], y_test,linestyle= ':', marker='o' ,label="true")
plt.plot([i for i in range(len(y_test))], y_predict, linestyle=':',marker='o' ,label="predict")
plt.legend()
plt.show()


分类算法

泰坦尼克号数据csv:百度网盘
https://pan.baidu.com/s/1NlXV-_DRGiaZKY1sP4mfPQ?pwd=2333
提取码:2333

# train_test_split用来划分数据集
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
import dataclasses
import numpy as np
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
# 逻辑回归
from sklearn.linear_model import LogisticRegression

print("*********步骤1*********\n")
column_name = ['Sample code number', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',
               'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin',
               'Normal Nucleoli', 'Mitoses', 'Class']
# 若在华为云上实验,请手动下载数据集后上传至Modelarts。
data = pd.read_csv(r"https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data",
                   names=column_name)

# 删除缺失值
data = data.replace(to_replace='?', value=np.nan)
data = data.dropna()

# 取出特征值
x = data[column_name[1:10]]
y = data[column_name[10]]

# 分割数据集测试集占比30%
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

# 进行标准化
std = StandardScaler()
x_train = std.fit_transform(x_train)
x_test = std.transform(x_test)

# 使用逻辑回归
lr = LogisticRegression()
lr.fit(x_train, y_train)
print("得出来的权重:", lr.coef_)

# 预测类别
print("预测的类别:", lr.predict(x_test))

# 得出准确率
print("预测的准确率:", lr.score(x_test, y_test))

print("*********步骤2*********\n")
iris = load_iris()

# x_train,x_testy_train,y_test为训练集特征值、测试集特征值、训练集目标值、测试集目标值
x_train, x_test, y_train, y_test = train_test_split(
    iris.data, iris.target, test_size=0.2, random_state=22)
transfer = StandardScaler()
x_train = transfer.fit_transform(x_train)
x_test = transfer.transform(x_test)
# 实例化 KNN分类器
estimator = KNeighborsClassifier(n_neighbors=9)
estimator.fit(x_train, y_train)
# 模型评估
y_predict = estimator.predict(x_test)
print("预测结果为:¥n", y_predict)
print("比对真实值和预测值:¥n", y_predict == y_test)
score = estimator.score(x_test, y_test)
print("准确率为:¥n", score)

print("*********步骤3*********\n")
#titan = pd.read_csv("http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.xls")
f=open(r'C:\Users\10560\Desktop\source\python作业\常用工具库-实验\Scikit-leaarn\train.csv')
titan = pd.read_csv(f)
x = titan[['pclass', 'age', 'sex']]

y = titan['survived']
# 缺失值需要处理,将特征当中有类别的这些特征进行字典特征抽取
x['age'].fillna(x['age'].mean(), inplace=True)
# 对于×转换成字典数据x.to_dict(orient="records")
#[{"pclass": "1st", "age": 29.00, "sex": "female"}, {}]

dict = DictVectorizer(sparse=False)

x = dict.fit_transform(x.to_dict(orient="records"))
print(dict.get_feature_names_out())
print(x)
# 分割训练集合测试集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

# 进行决策树的建立和预测
# 指定树的深度大小为5
dc = DecisionTreeClassifier(criterion='entropy', max_depth=5)
dc.fit(x_train, y_train)
print("预测的准确率为: ", dc.score(x_test, y_test))

聚类算法

import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import DBSCAN, KMeans
from sklearn import datasets

iris = datasets.load_iris()
X = iris.data[:, :4]  # 表示我们取特征空间中的4个维度
print(X.shape)

# 绘制数据分布图
plt.scatter(X[:, 0], X[:, 1], c="red", marker='o', label='data')
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend(loc=2)
plt.show()

estimator = KMeans(n_clusters=3)  # 构造聚类器
estimator.fit(X)  # 聚类
label_pred = estimator.labels_  # 获取聚类标签
# 绘制k-means结果
x0 = X[label_pred == 0]
x1 = X[label_pred == 1]
x2 = X[label_pred == 2]
plt.scatter(x0[:, 0], x0[:, 1], c="red", marker='o', label='labelO')
plt.scatter(x1[:, 0], x1[:, 1], c="green", marker='*', label='label1')
plt.scatter(x2[:, 0], x2[:, 1], c="blue", marker='+', label='label2')
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend(loc=2)
plt.show()

estimator = DBSCAN(eps=0.4, min_samples=4)  # 构造聚类器
estimator.fit(X)  # 聚类
label_pred = estimator.labels_  # 获取聚类标签
# 绘制k-means结果
x0 = X[label_pred == 0]
x1 = X[label_pred == 1]
x2 = X[label_pred == 2]
plt.scatter(x0[:, 0], x0[:, 1], c="red", marker='o', label='labelO')
plt.scatter(x1[:, 0], x1[:, 1], c="green", marker='*', label='label1')
plt.scatter(x2[:, 0], x2[:, 1], c="blue", marker='+', label='label2')
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend(loc=2)
plt.show()

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值