【Spark六十六】Spark求数据集平均值以及单词词频TopK

package spark.examples.avg

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._

object SparkAvg {
  def main(args: Array[String]) {
    System.setProperty("hadoop.home.dir", "E:\\devsoftware\\hadoop-2.5.2\\hadoop-2.5.2");
    val conf = new SparkConf()
    conf.setAppName("SparkAvg")
    conf.setMaster("local[3]")
    conf.set("spark.shuffle.manager", "sort");
    val sc = new SparkContext(conf)
    val a = sc.parallelize(1 to 9, 3)
    def func(iter: Iterator[Int]): Iterator[(Int, Int)] = {
      var res = List[(Int, Int)]()
      var count = 0
      var sum: Int = 0
      while (iter.hasNext) {
        count += 1
        sum += iter.next
      }
      res = (count, sum) :: res
      res.iterator
    }
    var sum = 0;
    var count = 0;
    val entries = a.mapPartitions(func).collect;
    for (entry <- entries) {
      count += entry._1
      sum += entry._2
    }
    println("count: " + count + "," + "sum: " + sum)
    sc.stop
  }
}

 

单词词频topK

package spark.examples.avg

import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.SparkContext._

object SparkTopK {
  def main(args : Array[String]) {
    val conf = new SparkConf().setAppName("SparkTopP").setMaster("local")
    val sc = new SparkContext(conf)
    val rdd = sc.parallelize(List(100,32,67,17,7,71, 38))
    val results = rdd.top(4);
    results.foreach(println)

    ///单词计数TopK
    val words = sc.parallelize(List("This is a book", "That is a desk",  "what is That"))
    val results2 = words.flatMap(_.split(" ")).map(x => (x, 1)).reduceByKey(_ + _).map(x => (x._2, x._1)).sortByKey(true, 1).top(3)
    results2.foreach(println)
    sc.stop
  }
}

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值