import pyspark
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
# 创建SparkSession对象,设置应用名称为“mushrooms”,并使用local模式,创建2个Worker线程
spark = SparkSession.builder.appName("mushrooms").master("local[2]").getOrCreate()
# 读取CSV文件,创建DataFrame对象df1
df1 = spark.read.format("csv") \
.option("header", True) \
.option("sep", ",") \
.load("file:///home/ubuntu/Desktop/mushrooms.csv")
# 打印df1的前10条记录
df1.show(10)
from pyspark.ml.feature import StringIndexer
# 将class属性转换为label
label_indexer = StringIndexer(inputCol='class', outputCol='label')
# 将剩余属性转换为数值索引
other_indexers = [
StringIndexer(inputCol=column, outputCol=f"new_{column}") for column in df1.columns if column != 'class'
]
# 使用转换器将所有列转换为数值索引,并保存在新的列中
pipeline = Pipeline(stages=[label_indexer] + other_indexers)
model = pipeline.fit(df1)
df2 = model.transform(df1)
# 打印df2的前10条记录
df2.show(10)
from pyspark.ml.feature import VectorAssembler
# 获取所有待合并的特征列
feature_cols = [col for col in df2.columns if col.startswith('new_')]
# 使用 VectorAssembler 将所有特征列合并为一个 features 向量列
assembler = VectorAssembler(inputCols=feature_cols, outputCol='features')
df3 = assembler.transform(df2).select('features', 'label')
# 打印df3的前10条记录
df3.show(10)
# 将数据集按照 7:3 的比例划分为训练集和测试集,随机种子为 2023
train_ratio = 0.7
test_ratio = 1 - train_ratio
seed = 2023
train_data, test_data = df3.randomSplit([train_ratio, test_ratio], seed=seed)
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# 定义随机森林分类器
rf = RandomForestClassifier(numTrees=10, seed=seed)
# 拟合模型并进行预测
model = rf.fit(train_data)
predictions = model.transform(test_data)
# 创建一个多类分类评估器并计算准确率
evaluator = MulticlassClassificationEvaluator(labelCol='label', predictionCol='prediction', metricName='accuracy')
accuracy = evaluator.evaluate(predictions)
print('Accuracy:', accuracy)
# 输出测试集前两个样本的预测概率和分类结果
predictions.show(2, truncate=False)