朴素贝叶斯分类器

基于TDP的情感分析写的对于twitt的情感分类

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd._
import scala.util.parsing.json._
import java.util.StringTokenizer

import scala.collection.mutable.HashMap




object SimpleApp{
    def main(args: Array[String]){
        var totalWordNumber = 0
        var happyProb = 0.0
        var sadProb = 0.0

        //var classiferPara: (Int, Double, Double) = (0,0.0,0.0)

        //val logFile = "/home/hrl/spark-0.9.1/README.md"
        val sc = new SparkContext("local", "Simple App", "/home/hrl/spark-0.9.1", List("target/scala-2.10/simple-project_2.10-1.0.jar"))
        //val logData = sc.textFile(logFile, 2).cache()
        val owsFile = "/home/hrl/spark-0.9.1/apps/sentimenAnalysis/data/ows.json"
        //val logData = sc.textFile(owsFile)
        val owsData = sc.textFile(owsFile)

        val twittText = owsData.map(parseJson _)
        val twittTextWithSent = twittText.map(parseSentiment _)
        //twittTextWithSent.collect()

        //(twitt, (1,0))
        val twittWord = twittTextWithSent.map(mapSentWord _)
        //twittWord.collect

        //twittWordGroup is instance of HashMap[Word, (Int, Int)] not RDD
        val twittWordGroup = groupWordNumber(twittWord)
        //twittWordGroup.collect
        
        //totalWordNumber = countTotalWord(twittWordGroup)
        //(totalWordNumber, happyProb, sadProb)
        val classiferPara:(Int, Double, Double) = classiferParameters(twittWordGroup)
        
        val twitt = "thing NYC could do to #Occupy is what they are doing right now. Suppression always  always has the opposite effect"

        val classProbs = classify(twitt, twittWordGroup, classiferPara)
        println("job done")
        //val numAs = logData.filter(line => line.contains("a")).count()
        //val numBs = logData.filter(line => line.contains("b")).count()
        //println("Lines with a: %s, Lines with b: %s".format(numAs, numBs))
    }

    def parseJson(line: String): String = {
        val result = JSON.parseFull(line)
        result match{
            case Some(e) => {
                val r = e.asInstanceOf[Map[String, String]]
                r("text")
            }
            case None => "failed"
        }
    }
    
    //(twittText,1,0,-1)  (happy, nonDecided, sad)
    def parseSentiment(twittText: String): (String, Int) = {
      val HAPPY_SMILEYS: Array[String] = Array[String](":)", ";)", ":D", ":-)", ":o)", ":-D")
      val SAD_SMILEYS:Array[String] = Array[String](":(", ":-(", ":'(", ":'-(", "D:")
    
      val tokens = new StringTokenizer(twittText)
      var found = false
      var res= 0
      while(tokens.hasMoreTokens() && !found){
        val token: String = tokens.nextToken()
        if(HAPPY_SMILEYS.contains(token)){
          found = true
          res = 1
        }
        else if(SAD_SMILEYS.contains(token)){
          found = true
          res = -1
        }
      }
      (twittText, res)
    }

    //parse (twittText, senti) => (word, (n1, n2))
    /*
    def mapSentWord(sentTwitt: (String, Int)): (String,(Int, Int))={
        sentTwitt match {
            case (twitt: String, 1) =>{
                (twitt, (1, 0))
            }
            case (twitt: String, -1) =>{
                (twitt, (0, 1))
            }
            case (twitt: String, 0) =>{
                (twitt, (0, 0))
            }
        }
    }
    */
    /*
    def mapSentWord(sentTwitt: (String, Int)): Array[(String,(Int, Int))]={
        val tokens = new StringTokenizer(sentTwitt._1)
        var result:Array[(String,(Int,Int))] = Array()
        sentTwitt match {
            case (twitt: String, 1) =>{
                 while(tokens.hasMoreTokens()){
                   result = result ++ Array((tokens.nextToken(),(1,0)))
                 }
            }dev-unsubscribe@spark.apache.org
            case (twitt: String, -1) =>{
                 while(tokens.hasMoreTokens()){
                   result = result ++ Array((tokens.nextToken(),(0,1)))
                 }
            }
            case (twitt: String, 0) =>{
                 while(tokens.hasMoreTokens()){
                   result = result ++ Array((tokens.nextToken(),(0,0)))
                 }
            }
        }
        result
    }
    */

     //first place is happy class, the second place is sad class
   def mapSentWord(sentTwitt: (String, Int)): HashMap[String,(Int, Int)]={
        val tokens = new StringTokenizer(sentTwitt._1)
        val result:HashMap[String,(Int,Int)] = HashMap()
        sentTwitt match {
            case (twitt: String, 1) =>{
                 while(tokens.hasMoreTokens()){
                   val token = tokens.nextToken();
                   if(result.contains(token)){
                     result(token) = (result(token)._1+1, result(token)._2)
                   }else{
                     //add new key,value to the result hashmap
                     result += (token -> (1,0))
                   }
                   //result = result ++ HashMap((tokens.nextToken(),(1,0)))
                 }
            }
            case (twitt: String, -1) =>{
                 while(tokens.hasMoreTokens()){
                   val token = tokens.nextToken();
                   if(result.contains(token)){
                     result(token) = (result(token)._1, result(token)._2 + 1)
                   }else{
                     //add new key,value to the result hashmap
                     result += (token -> (0,1))
                   }
                   //result = result ++ HashMap((tokens.nextToken(),(1,0)))
                 }
            }
            case (twitt: String, 0) =>{
                 while(tokens.hasMoreTokens()){
                   val token = tokens.nextToken();
                   if(result.contains(token)){
                     result(token) = (result(token)._1, result(token)._2)
                   }else{
                     //add new key,value to the result hashmap
                     result += (token -> (0,0))
                   }
                   //result = result ++ HashMap((tokens.nextToken(),(1,0)))
                 }
            }
        }
        result
    }
    
    def addMap(a: HashMap[String,(Int, Int)], b: HashMap[String,(Int, Int)]): HashMap[String,(Int, Int)] = {
      
      a.foreach{
        case (key: String,value) =>
          if(b.contains(key)){
            b(key) =((b(key)._1 + a(key)._1), (b(key)._2 + a(key)._2))
          }
          else{
            if(!(value == (0,0)))
                b += (key -> value)
          }
      }
      b
    }

    //parse (twittText, (0,1),(1,0)) to (twittText, (1,1))
    def groupWordNumber(sentWordRDD: RDD[HashMap[String, (Int,Int)]]): HashMap[String, (Int,Int)] = {
        sentWordRDD.reduce((x,y) => addMap(x,y))
    }

    def countTotalWord(twittWordGroup: HashMap[String,(Int,Int)]): Int = {
        var totalWord = 0
        twittWordGroup.foreach{
            case(key, value)=> totalWord += value._1 + value._2
        }
      totalWord
    }

    def classiferParameters(twittWordGroup: HashMap[String, (Int, Int)]): (Int, Double, Double) = {
      var happyNum = 0
      var sadNum = 0
      var totalWordNumber = 0
      twittWordGroup.foreach{
        case(key, value)=> {
          happyNum += value._1
          sadNum += value._2
          totalWordNumber += value._1 + value._2
        }
      }
      (totalWordNumber, happyNum.toDouble/totalWordNumber, sadNum.toDouble/totalWordNumber)
    }

    def classify(twitt: String, twittWordGroup: HashMap[String, (Int, Int)], paras: (Int, Double, Double)): (Double, Double) = {
        val tokens = new StringTokenizer(twitt)
        var firstProb = 1.0
        var secProb = 1.0
        var foundClass1 = false
        var foundClass2 = false
        while(tokens.hasMoreTokens()){
          
          val token = tokens.nextToken();
          if(twittWordGroup.contains(token)){
            val pWord1 = (twittWordGroup(token)._1 + twittWordGroup(token)._2).toDouble / paras._1
            val pClass1 = paras._2
            if(twittWordGroup(token)._1 > 0){
                foundClass1 = true
                val pWordGivenClass1 = twittWordGroup(token)._1.toDouble /(twittWordGroup(token)._1+ twittWordGroup(token)._2)
                firstProb *=  pWordGivenClass1 * pClass1 / pWord1
            }
            val pWord2 = (twittWordGroup(token)._1 + twittWordGroup(token)._2).toDouble / paras._1
            val pClass2 = paras._3
            if(twittWordGroup(token)._2 > 0){
                foundClass2 = true
                val pWordGivenClass2 = twittWordGroup(token)._2.toDouble /(twittWordGroup(token)._1+ twittWordGroup(token)._2)
                secProb *= pWordGivenClass2 * pClass2 / pWord2
            }
          }else{
            //do noting
          }
        }
        if(!foundClass1) firstProb = 0.0
        if(!foundClass2) secProb = 0.0
        (firstProb, secProb)
    }

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值