from pyspark.ml.feature import HashingTF, IDF, Tokenizer
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local").appName("Word Count333").getOrCreate()
sentenceData = spark.createDataFrame([
(1, "I heard about Spark and I love Spark"),
(0, "I wish Java could use case classes"),
(1, "Logistic regression models are neat")
]).toDF("label", "sentence")
tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
print(type(tokenizer))
wordsData = tokenizer.transform(sentenceData)
wordsData.show()
hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=2000)
featurizedData = hashingTF.transform(wordsData)
featurizedData.select("words", "rawFeatures").show(truncate=False)
idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel=idf.fit(featurizedData)
rescaledData = idfModel.transform(featurizedData)
rescaledData.select("features", "label").show(truncate=False)
之前在jupyter notebook上运行的结果为:
<class 'pyspark.ml.feature.Tokenizer'>
+-----+--------------------+--------------------+
|label| sentence| words|
+-----+--------------------+--------------------+
| 1|I heard about Spa...|[i, heard, about,...|
| 0|I wish Java could...|[i, wish, java, c...|
| 1|Logistic regressi...|[logistic, regres...|
+-----+--------------------+--------------------+
+---------------------------------------------+---------------------------------------------------------------------+
|words |rawFeatures |
+---------------------------------------------+---------------------------------------------------------------------+
|[i, heard, about, spark, and, i, love, spark]|(2000,[240,333,1105,1329,1357,1777],[1.0,1.0,2.0,2.0,1.0,1.0]) |
|[i, wish, java, could, use, case, classes] |(2000,[213,342,489,495,1329,1809,1967],[1.0,1.0,1.0,1.0,1.0,1.0,1.0])|
|[logistic, regression, models, are, neat] |(2000,[286,695,1138,1193,1604],[1.0,1.0,1.0,1.0,1.0]) |
+---------------------------------------------+---------------------------------------------------------------------+
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----+
|features |label|
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----+
|(2000,[240,333,1105,1329,1357,1777],[0.6931471805599453,0.6931471805599453,1.3862943611198906,0.5753641449035617,0.6931471805599453,0.6931471805599453]) |1 |
|(2000,[213,342,489,495,1329,1809,1967],[0.6931471805599453,0.6931471805599453,0.6931471805599453,0.6931471805599453,0.28768207245178085,0.6931471805599453,0.6931471805599453])|0 |
|(2000,[286,695,1138,1193,1604],[0.6931471805599453,0.6931471805599453,0.6931471805599453,0.6931471805599453,0.6931471805599453]) |1 |
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----+