:Spark NLP常用方法
:
package com.bbw5.ml.spark
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.ml.feature.CountVectorizer
import org.apache.spark.ml.feature.CountVectorizerModel
import org.apache.spark.sql.SQLContext
import org.apache.spark.ml.feature.Tokenizer
import org.apache.spark.ml.feature.StopWordsRemover
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature.HashingTF
import org.apache.spark.ml.feature.IDF
/**
* feature extract for text
*/
object FeatureExtractandPreprocess {
def main(args: Array[String]) {
val sparkConf = new SparkConf().setAppName("LinearRegression4Wine")
val sc = new SparkContext(sparkConf)
val sqlContext = new SQLContext(sc)
}
def nlpPipeline(sc: SparkContext, sqlContext: SQLContext) {
import sqlContext.implicits._
val documents = sc.textFile("G:/temp/data/documents.txt")
val df = documents.toDF("text")
df.show()
val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("tokens")
val remover = new StopWordsRemover().setInputCol("tokens").setOutputCol("words")
//val countVector = new CountVectorizer().setInputCol("words").setOutputCol("count_words").setVocabSize(20).setMinDF(1)
val hashingTF = new HashingTF().setNumFeatures(50).setInputCol("words").setOutputCol("rawFeatures")
val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
val pipeline = new Pipeline().setStages(Array(tokenizer, remover, hashingTF, idf))
val model = pipeline.fit(df)
val tf = model.transform(df)
tf.show()
tf.select("words", "count_words").show(2, false)
tf.select("features").show(2, false)
}
/**
* fruits.txt:
* 苹果, 香蕉, 梨子
* 苹果, 草莓, 芒果, 西瓜
* 草莓, 葡萄, 香瓜
* 榴莲, 橘子, 橙子
*/
def countVectorizer(sc: SparkContext, sqlContext: SQLContext) {
import sqlContext.implicits._
val fruits = sc.textFile("G:/temp/data/fruits.txt")
val df = fruits.map { x => (0, x.split(",").map { x => x.trim().toLowerCase() }) }.toDF("id", "words")
// fit a CountVectorizerModel from the corpus
val cvModel: CountVectorizerModel = new CountVectorizer().setInputCol("words").setOutputCol("features").setVocabSize(7).setMinDF(1).fit(df)
// alternatively, define CountVectorizerModel with a-priori vocabulary
val cvm = new CountVectorizerModel(Array("a", "b", "c")).setInputCol("words").setOutputCol("features")
println(cvModel.vocabulary.toList)
cvModel.transform(df).show()
cvModel.transform(df).select("features").foreach { r => println(r.get(0).getClass()) }
}
def tokenizer(sqlContext: SQLContext) {
import org.apache.spark.ml.feature.{ RegexTokenizer, Tokenizer }
val sentenceDataFrame = sqlContext.createDataFrame(Seq(
(0, "Hi I heard about Spark"),
(1, "I wish Java could use case classes"),
(2, "Logistic,regression,models,are,neat"))).toDF("label", "sentence")
val tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
val regexTokenizer = new RegexTokenizer()
.setInputCol("sentence")
.setOutputCol("words")
.setPattern("\\W") // alternatively .setPattern("\\w+").setGaps(false)
val tokenized = tokenizer.transform(sentenceDataFrame)
tokenized.select("words", "label").take(3).foreach(println)
val regexTokenized = regexTokenizer.transform(sentenceDataFrame)
regexTokenized.select("words", "label").take(3).foreach(println)
}
/**
* english stop word:
* http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
*/
def stopWorld(sqlContext: SQLContext) {
import org.apache.spark.ml.feature.StopWordsRemover
val remover = new StopWordsRemover()
.setInputCol("raw")
.setOutputCol("filtered")
val dataSet = sqlContext.createDataFrame(Seq(
(0, Seq("I", "saw", "the", "red", "baloon")),
(1, Seq("Mary", "had", "a", "little", "lamb")))).toDF("id", "raw")
remover.transform(dataSet).show()
}
def stem(sqlContext: SQLContext) {
}
def lemmatization(sqlContext: SQLContext) {
}
def tfidf(sqlContext: SQLContext) {
import org.apache.spark.ml.feature.{ HashingTF, IDF, Tokenizer }
val sentenceData = sqlContext.createDataFrame(Seq(
(0, "Hi I heard about Spark"),
(0, "I wish Java could use case classes"),
(1, "Logistic regression models are neat"))).toDF("label", "sentence")
val tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
val wordsData = tokenizer.transform(sentenceData)
wordsData.select("words").show(2, false)
val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(10)
val featurizedData = hashingTF.transform(wordsData)
featurizedData.select("rawFeatures").show(2, false)
val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
val idfModel = idf.fit(featurizedData)
val rescaledData = idfModel.transform(featurizedData)
rescaledData.select("features", "label").take(3).foreach(println)
}
def word2vec(sqlContext: SQLContext) {
import org.apache.spark.ml.feature.Word2Vec
// Input data: Each row is a bag of words from a sentence or document.
val documentDF = sqlContext.createDataFrame(Seq(
"Hi I heard about Spark".split(" "),
"I wish Java could use case classes".split(" "),
"Logistic regression models are neat".split(" ")).map(Tuple1.apply)).toDF("text")
// Learn a mapping from words to Vectors.
val word2Vec = new Word2Vec()
.setInputCol("text")
.setOutputCol("result")
.setVectorSize(3)
.setMinCount(0)
val model = word2Vec.fit(documentDF)
val result = model.transform(documentDF)
result.select("result").take(3).foreach(println)
}
def n_gram(sqlContext: SQLContext) {
import org.apache.spark.ml.feature.NGram
val wordDataFrame = sqlContext.createDataFrame(Seq(
(0, Array("Hi", "I", "heard", "about", "Spark")),
(1, Array("I", "wish", "Java", "could", "use", "case", "classes")),
(2, Array("Logistic", "regression", "models", "are", "neat")))).toDF("label", "words")
val ngram = new NGram().setInputCol("words").setOutputCol("ngrams")
val ngramDataFrame = ngram.transform(wordDataFrame)
ngramDataFrame.take(3).map(_.getAs[Stream[String]]("ngrams").toList).foreach(println)
}
}
: