pyspark实现多文本分类问题

原文链接:https://www.cnblogs.com/cymx66688/p/10699018.html
原文链接:https://cloud.tencent.com/developer/article/1096712

 


"""
我们的任务,
是将旧金山犯罪记录(San Francisco Crime Description)分类到33个类目中。
输入:犯罪描述。
输出:类别。
"""
from pyspark.sql.functions import *
from pyspark.sql import SparkSession
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql import functions as F
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import RegexTokenizer, StopWordsRemover, CountVectorizer
from pyspark.ml import Pipeline
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler
from pyspark.ml.feature import HashingTF, IDF
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml.classification import NaiveBayes, RandomForestClassifier
import time


"""
from pyspark.sql.functions import col 总是出错,不能直接引用,所以变化一个
方法,使用,from pyspark.sql import functions as F
"""

# 利用Spark的csv库直接载入CSV格式的数据:
sc = SparkContext()
sqlcontext = SQLContext(sc)
data = sqlcontext.read.format("com.databricks.spark.csv").options(
    header='True', inferschema='True').load("C:/Users/35469/Desktop/data_files/sf_crime_data/train.csv")
# 除去一些不要的列,并展示前五行:
drop_list = ['Dates', 'DayOfWeek', 'PdDistrict', 'Resolution', 'Address', 'X', 'Y']
data = data.select([column for column in data.columns if column not in drop_list])
data.show(5)
# 利用printSchema()方法来显示数据的结构:
data.printSchema()

# 包含数量最多的20类犯罪:
data.groupBy("Category").count().orderBy(F.col("count").desc())\
    .show()
#包含犯罪数量最多的20个描述:
data.groupBy("Descript").count().orderBy(F.col("count").desc())\
    .show()

"""
RegexTokenizer:基于正则的方式进行文档切分成单词组
inputCol: 输入字段
outputCol: 输出字段
pattern: 匹配模式,根据匹配到的内容切分单词
CountVectorizer:构建词频向量
covabSize: 限制的词频数
minDF:如果是float,则表示出现的百分比小于minDF,不会被当做关键词
如果是int,则表示出现是次数小于minDF,不会被当做关键词
"""
# 利用正则化切分单词
regexTokenizer = RegexTokenizer(inputCol="Descript", outputCol="words", pattern="\\W")  # 注意此时w为大写的
# 移除停用词
add_stopwords = ["http", "https", "amp", "rt", "t", "c", "the"]
stopwordsRemover = StopWordsRemover(inputCol="words", outputCol="filtered").setStopWords(add_stopwords)
# 构建词频向量
countVectors = CountVectorizer(inputCol="filtered", outputCol="features",
                               vocabSize=10000, minDF=5)

# label编码,并使用Pipeline
"""
StringIndexer将一列字符串label编码为一列索引号,根据label出现的频率排序,
最频繁出现的label的index为0
该例子中,label会被编码成从0-32的整数,最频繁的label被编码成0
"""
label_stringIdx = StringIndexer(inputCol="Category", outputCol="label")
pipeline = Pipeline(stages=[regexTokenizer, stopwordsRemover, countVectors, label_stringIdx])
# Fit the pipeline to training documents.
pipelineFit = pipeline.fit(data)
dataset = pipelineFit.transform(data)
dataset.show(5)
# 训练集测试集数据划分
(trainingData, testData) = dataset.randomSplit([0.7, 0.3], seed=100)
# print("training dataset count:" + str(trainingData.count()))
# print("test dataset count:" + str(testData.count()))
#
# # 模型训练和评价
#
# # 4.1 以词频作为特征,利用逻辑回归进行分类
# """LogisticRegression:逻辑回归模型
# maxIter:最大迭代次数
# regParam:正则化参数
# elasticNetParam:正则化。0:l1;1:l2
# """
# lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)
# lrmodel = lr.fit(trainingData)
# predictions = lrmodel.transform(testData)
# # 过滤prediction类别为0数据集
# predictions.filter(predictions['prediction'] == 0) \
#                    .select("Descript", "Category", "probability", "label", "prediction") \
#                    .orderBy("probability", ascending=False) \
#                    .show(n=10, truncate=30)
# # predictionCol: 预测列的名称
# evaluator = MulticlassClassificationEvaluator(predictionCol="prediction")
# # 预测准确率
# print(evaluator.evaluate(predictions))

#4.2 以TF-ID作为特征,利用逻辑回归进行分类

# start_time = time.time()
# hashingTF = HashingTF(inputCol="filtered", outputCol="rawFeatures", numFeatures=10000)
# idf = IDF(inputCol="rawFeatures", outputCol="features", minDocFreq=5)
# pipeline = Pipeline(stages=[regexTokenizer, stopwordsRemover, hashingTF, idf, label_stringIdx])
# pipe_fit = pipeline.fit(data)
# dataset = pipe_fit.transform(data)
# (trainingData, testData) = dataset.randomSplit([0.7, 0.3], seed=100)
# lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)
# lr_model = lr.fit(trainingData)
# predictions = lr_model.transform(testData)
# predictions.filter(predictions['prediction'] == 0)\
#     .select('Descript', 'Category', 'probability', 'label', 'prediction') \
#     .orderBy('probability', ascending=False).show(n=10, truncate=30)
#
# # 验证精准度
# evaluator = MulticlassClassificationEvaluator(predictionCol='prediction')
# print(evaluator.evaluate(predictions))  # 0.971667924
# end_time = time.time()
# print(end_time - start_time)
#
#4.3 交叉验证
## 用交叉验证来优化参数,这里针对基于词频特征的逻辑回归模型进行优化
# start_time = time.time()
# pipeline = Pipeline(stages=[regexTokenizer, stopwordsRemover, countVectors, label_stringIdx])
# pipeline_fit = pipeline.fit(data)
# dataset = pipeline_fit.transform(data)
# (trainingData, testData) = dataset.randomSplit([0.7, 0.3], seed=100)
# lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)
# # 为交叉验证创建参数
# """
#  ParamGridBuilder:用于基于网格搜索的模型选择的参数网格的生成器
# # # addGrid:将网格中给定参数设置为固定值
# # # parameter:正则化参数
# # # maxIter:迭代次数
# # # numFeatures:特征值
# """
# paramGrid = (ParamGridBuilder()
#              .addGrid(lr.regParam, [0.1, 0.3, 0.5])
#              .addGrid(lr.elasticNetParam, [0.0, 0.1, 0.2])
#              .addGrid(lr.maxIter, [10, 20, 50])
#              .build()
#              )
#
# # # 创建五折交叉验证
# """  运行的时间太长了,要放弃了。。。。。。
# estimator:要交叉验证的估计器
# estimatorParamMaps:网格搜索的最优参数
# evaluator:评估器
# numFolds:交叉次数
# """
# evaluator = MulticlassClassificationEvaluator(predictionCol='prediction')
# cv = CrossValidator(estimator=lr,\
#                     estimatorParamMaps=paramGrid,\
#                     evaluator=evaluator,\
#                     numFolds=5)
# cv_model = cv.fit(trainingData)
# predictions = cv_model.transform(testData)
#
# # # 模型评估
# # evaluator = MulticlassClassificationEvaluator(predictionCol='prediction')
# print(evaluator.evaluate(predictions))
# end_time = time.time()
# print(end_time-start_time)
#
# # 4.4 朴素贝叶斯(选择了基于词频运行的结果,从结果来看还是挺好的)
# start_time = time.time()
# # # smoothing:平滑参数
# nb = NaiveBayes(smoothing=1)
# nb_model = nb.fit(trainingData)
# predictions = nb_model.transform(testData)
# predictions.filter(predictions['prediction'] == 0)\
#     .select('Descript', 'Category', 'probability', 'label', 'prediction')\
#     .orderBy('probability', ascending=False) \
#     .show(n=10, truncate=30)
#
# evaluator = MulticlassClassificationEvaluator(predictionCol='prediction')
# print(evaluator.evaluate(predictions))   # 0.993721
# end_time = time.time()
# print(end_time - start_time)
#
# 4.5 随机森林(选择了基于词频的特征运行的结果)
start_time = time.time()
"""
numTree:训练树的个数
maxDepth:最大深度
maxBins:连续特征离散化的最大分类数
"""
rf = RandomForestClassifier(labelCol='label',\
                            featuresCol='features',\
                            numTrees=100,\
                            maxDepth=4,
                            maxBins=32)
rf_model = rf.fit(trainingData)
predictions = rf_model.transform(testData)
predictions.filter(predictions['prediction'] == 0)\
    .select('Descript', 'Category', 'probability', 'label', 'prediction')\
    .orderBy('probability', ascending=False)\
    .show(n=10, truncate=30)
"""
从结果上可以看出,随机森林虽然是通用的模型,但对于高维稀疏矩阵,结果并不是很好
"""
evaluator = MulticlassClassificationEvaluator(predictionCol='prediction')
print(evaluator.evaluate(predictions))  # 0.659789126
end_time = time.time()
print(end_time - start_time)

 

  • 2
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值