需求:求连续登陆超过三天的用户,连续登陆天数,登陆起始时间
数据:
guid01,2018-02-28
guid01,2018-03-01
guid01,2018-03-02
guid01,2018-03-04
guid01,2018-03-05
guid01,2018-03-06
guid01,2018-03-07
guid02,2018-03-01
guid02,2018-03-02
guid02,2018-03-03
guid02,2018-03-06
代码:
import java.text.SimpleDateFormat
import java.util.{Calendar, Date}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object coutiunsDemo {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("GroupByKeyDemo").setMaster("local[*]")
val sc: SparkContext = new SparkContext(conf)
val dates: RDD[String] = sc.textFile("C:\\Users\\LEMMONT\\Desktop\\testdata\\date")
val format = new SimpleDateFormat("yyyy-MM-dd")
var calendar = Calendar.getInstance()
//(userId,登陆时间)
val rdd1: RDD[(String, String)] = dates.map(line => {
val orders = line.split(",")
(orders(0), orders(1))
})
//(userId,(登陆时间,连续起始登陆时间))
val rdd2: RDD[(String, (String, String))] = rdd1.groupByKey().flatMapValues(
it => {
val dateSorted: List[String] = it.iterator.toSet.toList.sorted
var index: Int = 0
dateSorted.map(x => { //分组内每组依次减去index,获取起始连续登陆时间
calendar.setTime(format.parse(x))
calendar.add(Calendar.DAY_OF_YEAR, -index)
index += 1
(x, format.format(calendar.getTime))
})
}
)
//最终结果 (userId,连续登陆天数,起始时间,终止时间)
val res: RDD[(String, Int, String, String)] = rdd2.map(x => {
((x._1, x._2._2), x._2._1) //((userId,连续起始登陆时间),登陆时间)
}).groupByKey().mapValues(it => {
val sorted = it.toList.sorted
(sorted.size, sorted.head, sorted.last)
}).filter(x => x._2._1 >= 3)
.map(t => (t._1._1, t._2._1, t._2._2, t._2._3))
res.collect().foreach(println(_))
sc.stop()
}
}
输出: