Spark实现高斯朴素贝叶斯

Spark实现高斯朴素贝叶斯

import breeze.stats.distributions.Gaussian
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.linalg.DenseVector
import org.apache.spark.sql.SparkSession
import org.apache.spark.ml.stat.Summarizer.{
  mean => summaryMean,
  variance => summaryVar
}
import org.apache.spark.sql.functions.udf


 def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName(s"${this.getClass.getSimpleName}")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._

    val sc = spark.sparkContext

    // 数据加载
    val irisData = spark.read
      .option("header", true)
      .option("inferSchema", true)
//      .csv("F:\\DataSource\\person_naiveBayes.csv")
      .csv("F:\\DataSource\\iris.csv")

    val rolluped = irisData.rollup($"class").count()

    // 样本量
    val sampleSize = rolluped.where($"class".isNull).head().getAs[Long](1)

    // 计算先验概率
    val pprobMap = rolluped
      .where($"class".isNotNull)
      .withColumn("pprob", $"count" / sampleSize)
      .collect()
      .map(row => {
        row.getAs[String]("class") -> row.getAs[Double]("pprob")
      })
      .toMap


    val schema = irisData.schema
    val fts = schema.filterNot(_.name == """class""").map(_.name).toArray

    // 数据转换
    val amountVectorAssembler: VectorAssembler = new VectorAssembler()
      .setInputCols(fts)
      .setOutputCol("features")

    val ftsDF = amountVectorAssembler
      .transform(irisData)
      .select("class", "features")
 
    // 聚合计算:计算特征均值向量和方差向量
    val irisAggred = ftsDF
      .groupBy($"class")
      .agg(
        summaryMean($"features") as "mfts",
        summaryVar($"features") as "vfts"
      )
 

    val cprobs: Array[(Array[(Double, Double)], String)] = irisAggred
      .collect()
      .map(row => {
        val cl = row.getAs[String]("class")
        val mus = row.getAs[DenseVector]("mfts").toArray
        val vars = row.getAs[DenseVector]("vfts").toArray
        (mus.zip(vars), cl)
      })
 

    def pdf(x: Double, mu: Double, sigma2: Double) = {
      Gaussian(mu, math.sqrt(sigma2)).pdf(x)
    }

    val predictUDF = udf((vec: DenseVector) => {
      cprobs
        .map(tp => {
          val tuples: Array[((Double, Double), Double)] = tp._1.zip(vec.toArray)
          val cp: Double = tuples.map {
            case ((mu, sigma), x) => pdf(x, mu, sigma)
          }.product
          val pprob: Double = pprobMap.getOrElse(tp._2, 0)
          (cp * pprob, tp._2)
        })
        .maxBy(_._1)
        ._2
    })

    val predictDF = ftsDF
      .withColumn("predict", predictUDF($"features"))
  

  predictDF.where($"class" =!= $"predict").show(truncate = false)
  
    spark.stop()
  }
+---------------+-----------------+---------------+
|class          |features         |predict        |
+---------------+-----------------+---------------+
|Iris-versicolor|[6.9,3.1,4.9,1.5]|Iris-virginica |
|Iris-versicolor|[5.9,3.2,4.8,1.8]|Iris-virginica |
|Iris-versicolor|[6.7,3.0,5.0,1.7]|Iris-virginica |
|Iris-virginica |[4.9,2.5,4.5,1.7]|Iris-versicolor|
|Iris-virginica |[6.0,2.2,5.0,1.5]|Iris-versicolor|
|Iris-virginica |[6.3,2.8,5.1,1.5]|Iris-versicolor|
+---------------+-----------------+---------------+

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值