Spark(day07) -- SparkStreaming

一.Introduction

http://spark.apache.org/docs/latest/streaming-programming-guide.html

*Important jar:

commons-lang3-3.3.2.jar

scala-library-2.10.5.jar

spark-streaming-flume-sink_2.10-1.6.1.jar

spark-streaming-kafka_2.10-1.6.1.jar


二.Code

StreamingWC:

package SparkStreaming

import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object StreamingWC {
  def main(args: Array[String]): Unit = {
    val conf=new SparkConf().setAppName("StreamingWC").setMaster("local[2]")
    val sc=new SparkContext(conf)
    //设置每三秒切分一次RDD
    val ssc=new StreamingContext(sc,Seconds(3))

    //创建Dstream接受数据
    val ds=ssc.socketTextStream("192.168.16.100",1111)

   val res= ds.flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_)
    res.print()

    ssc.start()
    ssc.awaitTermination()

  }
}

AccWC:

package SparkStreaming

import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

object AccWC {
  //String 单词
  //Seq[Int] 当前批次某个单词出现的次数   (hello,1),(hello,1),(tom,1)
  //Option[Int] 以前的累加值             (hello,seq(1,1)),tom(seq(1))
  val updataFunc=(iter:Iterator[(String,Seq[Int],Option[Int])])=>{
//    iter.flatMap{case (x,y,z)=>Some(y.sum+z.getOrElse(0)).map(m=>{
//      (x,m)})}

    iter.flatMap(it=>Some(it._2.sum+it._3.getOrElse(0)).map(x=>(it._1,x)))

  }
  def main(args: Array[String]): Unit = {
    LoggerLevels.setStreamingLogLevels()
    val conf=new SparkConf().setAppName("AccWC").setMaster("local[2]")
   val sc=new SparkContext(conf)
        sc.setCheckpointDir("c://ck11111111111111110")
    val ssc=new StreamingContext(sc,Seconds(5))

    val ds=ssc.socketTextStream("192.168.16.100",8888)
    val res=ds.flatMap(_.split(" ")).map((_,1)).updateStateByKey(updataFunc,new HashPartitioner(sc.defaultParallelism),true)
    res.print()
    ssc.start()
    ssc.awaitTermination()



  }
}

KafkaWC:

package SparkStreaming

import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

object KafkaWC {
  val updataFunc=(iter:Iterator[(String,Seq[Int],Option[Int])])=>{
    //    iter.flatMap{case (x,y,z)=>Some(y.sum+z.getOrElse(0)).map(m=>{
    //      (x,m)})}

    iter.flatMap(it=>Some(it._2.sum+it._3.getOrElse(0)).map(x=>(it._1,x)))

  }
  def main(args: Array[String]): Unit = {
    LoggerLevels.setStreamingLogLevels()
    val Array(zkQuorum,group,topics,numThreads)=args
    val conf=new SparkConf().setAppName("KafkaWC").setMaster("local[2]")
    val sc=new SparkContext(conf)
    sc.setCheckpointDir("c://115")
    val ssc=new StreamingContext(sc,Seconds(3))
    val topicMap=topics.split(",").map((_,numThreads.toInt)).toMap
    val ds=KafkaUtils.createStream(ssc,zkQuorum,group,topicMap,StorageLevel.MEMORY_ONLY)
    //(null,value)
    val words=ds.map(_._2).flatMap(_.split(" "))
    val res=words.map((_,1)).updateStateByKey(updataFunc,new HashPartitioner(sc.defaultParallelism),true)
    res.print()
    ssc.start()
    ssc.awaitTermination()


  }
}

FlumePush:

package SparkStreaming

import org.apache.spark.streaming.flume.FlumeUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object FlumePush {
  def main(args: Array[String]): Unit = {
    LoggerLevels.setStreamingLogLevels()
    val conf=new SparkConf().setAppName("FlumePush").setMaster("local[2]")
    val sc=new SparkContext(conf)
    val ssc=new StreamingContext(sc,Seconds(3))

//通过push的方式向ss发送数据
    val flumeDS=FlumeUtils.createStream(ssc,"192.168.16.66",1111)
//  flume中的数据通过event.getBody 才能拿到真正的数据
    val words=flumeDS.flatMap(x=>new String(x.event.getBody.array()).split(" ").map((_,1)))
    val result=words.reduceByKey(_+_)
    result.print()
    ssc.start()
    ssc.awaitTermination()


  }
}

FlumePoll:

package SparkStreaming

import java.net.InetSocketAddress

import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.flume.FlumeUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object FlumePoll {
  def main(args: Array[String]): Unit = {
    LoggerLevels.setStreamingLogLevels()
    val conf=new SparkConf().setAppName("FlumePoll").setMaster("local[2]")
    val sc=new SparkContext(conf)
    val ssc=new StreamingContext(sc,Seconds(3))

    val address = Seq(new InetSocketAddress("192.168.16.100",1111))
    val flumeds=FlumeUtils.createPollingStream(ssc,address,StorageLevel.MEMORY_ONLY)
    val tup=flumeds.flatMap(x=>new String(x.event.getBody().array()).split(" ").map((_,1)))
    val res=tup.reduceByKey(_+_)
    res.print()
    ssc.start()
    ssc.awaitTermination()

  }
}

BlackFilterList:

package SparkStreaming

import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
//需求:用户对我们的网站上的广告进行点击,点击后要进行实时计费,但是要对刷广告的人进行过滤,只要是黑名单里面的用户就会过滤掉

//1.在linux上开启端口8888
//2.在linux界面上输入日志内容
//0323 jay
//0323 jj
//0323 tom
//3.如果控制台没有打印jj,测试通过

object BlackListFilter extends App{
  LoggerLevels.setStreamingLogLevels()
  val conf=new SparkConf().setAppName("BlackListFilter").setMaster("local[2]")
  val sc=new SparkContext(conf)
  //设置每三秒切分一次RDD
  val ssc=new StreamingContext(sc,Seconds(3))
//  先模拟一份黑名单,true说明启用,false代表不启用
  val blakList=Array(("jj",true),("jay",true))
  val blackRDD=sc.parallelize(blakList)

  //监听8888端口
  val log=ssc.socketTextStream("192.168.16.100",8888)
  //广告点击数据为(time,name)  map的结果为(name,(time,name))
  val ds=log.map{x=>(x.split(" ")(1),x)}
  //创建tansform操作,可以对一个Dstream进行rdd->rdd操作,比如要对ds流中的rdd和另外的一个数据集进行join操作,要是使用tansform方法来实现
  val endDs=ds.transform(my=>{
    //leftOuterJoin即保留了左侧用户广告点击的所有内容又获得了相应的内容是否在黑名单中
    val joinRDD=my.leftOuterJoin(blackRDD)
    val endRDDs=joinRDD.filter(tuple=>{
      //tuple._2._2 能get到值 返回值,如果不能返回false

      //进行filter过滤的时候,其输入的元素是tuple(name,((time,name),boolean))
//      其中第一个元素是黑名单的名称,第二个元素的第二个元素是进行leftOuterJoin的时候是否存在的值 如果存在,是黑名单里的用户需要过滤,返回false,否则的话是有效点击内容
      if(tuple._2._2.getOrElse(false)){
        false
      }else{
        true
      }
    })
    endRDDs.map(_._2._1)

  })
  endDs.print()
  ssc.start()
  ssc.awaitTermination()
}

WindowFunc:

package SparkSql


import org.apache.log4j.{Level, Logger}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{Logging, SparkConf, SparkContext}

object WindowFunc {
  def main(args: Array[String]): Unit = {
    LoggerLevels.setStreamingLogLevels()
    val conf = new SparkConf().setAppName("WindowFunc").setMaster("local[2]")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc,Seconds(5))

    val ds = ssc.socketTextStream("192.168.16.100",7777)
    val tuple = ds.flatMap(_.split(" ")).map((_,1))
    val winWC = tuple.reduceByKeyAndWindow((a:Int,b:Int)=>a+b,Seconds(15),Seconds(10))
    winWC.print()
    ssc.start()
    ssc.awaitTermination()
  }
}
object LoggerLevels extends Logging {

  def setStreamingLogLevels() {
    val log4jInitialized = Logger.getRootLogger.getAllAppenders.hasMoreElements
    if (!log4jInitialized) {
      logInfo("Setting log level to [WARN] for streaming example." +
        " To override add a custom log4j.properties to the classpath.")
      Logger.getRootLogger.setLevel(Level.WARN)
    }
  }
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值