UV:
测试数据:
192.168.33.16,hunter,2017-09-16 10:30:20,/a
192.168.33.16,jack,2017-09-16 10:30:40,/a
192.168.33.16,jack,2017-09-16 10:30:40,/a
192.168.33.16,jack,2017-09-16 10:30:40,/a
192.168.33.16,jack,2017-09-16 10:30:40,/a
192.168.33.18,polo,2017-09-16 10:30:50,/b
192.168.33.39,nissan,2017-09-16 10:30:53,/b
192.168.33.39,nissan,2017-09-16 10:30:55,/b
192.168.33.39,nissan,2017-09-16 10:30:58,/c
192.168.33.20,ford,2017-09-16 10:30:54,/c
192.168.33.20,ford,2017-09-16 10:30:54,/c
测试结果:
(UV,4)
package com.spark.core
import org.apache.spark.{SparkConf, SparkContext}
object UV {
System.setProperty("hadoop.home.dir","D:\\soft\\hadoop\\hadoop-2.9.2")
def main(args: Array[String]): Unit = {
//todo:构建SparkConf和 SparkContext
val sparkConf = new SparkConf().setAppName("UV").setMaster("local[2]")
val sc = new SparkContext(sparkConf)
//todo:读取数据
val file = sc.textFile("f:/out/access.txt")
//todo:对每一行分隔,获取IP地址
val ips = file.map(_.split(",")).map(x=>x(0))
//todo:对ip地址进行去重,最后输出格式 ("UV",1)
val uvAndOne = ips.distinct().map(x=>("UV",1))
//todo:聚合输出
val totalUV = uvAndOne.reduceByKey(_+_)
totalUV.foreach(println)
//todo:数据结果保存
// totalUV.saveAsTextFile("d:\\data\\out")
sc.stop()
}
}
PV:
测试结果:
(pv,11)
package com.spark.core
import org.apache.spark.{SparkConf, SparkContext}
import scala.util.Random
object PV {
System.setProperty("hadoop.home.dir","D:\\soft\\hadoop\\hadoop-2.9.2")
def main(args: Array[String]): Unit = {
//todo:创建sparkconf,设置appName
//todo:setMaster("local[2]")在本地模拟spark运行 这里的数字表示 使用2个线程
val sparkConf = new SparkConf().setAppName("PV").setMaster("local[2]")
//todo:创建SparkContext
val sc = new SparkContext(sparkConf)
//todo:读取数据
val file = sc.textFile("f:/out/access.txt")
//方法一
// //todo:将一行数据作为输入,输出("pv",1)
// val pvAndOne = file.map(x=>("pv",1))
// //todo:聚合输出
// val totalPV = pvAndOne.reduceByKey(_+_)
//方式二 (调优)
val totalPV1 = file.map(x => (s"pv${Random.nextInt(file.getNumPartitions)}",1)).reduceByKey(_ + _)
val totalPV = totalPV1.map(tuple => ("pv", tuple._2)).reduceByKey(_ + _)
totalPV.foreach(println)
sc.stop()
}
}