数据:
import org.apache.spark.sql.{DataFrame, SparkSession}
object words1 {
def main(args: Array[String]): Unit = {
val session = SparkSession.builder().appName("words").master("local[*]").getOrCreate()
val workd = session.read.textFile("D:\\数据\\as.txt")
import session.implicits._
val key_word= workd.map(line=>{
val field = line.split("\t")
val keyword=field(2)
val url=field(4)
//直接过滤掉无效的数据
val clickorder :Long=if(field(3).split(" ").length >1) field(3).split(" ")(1).toLong else 0
Data(keyword,url,clickorder)
}).rdd.toDF()
key_word.createTempView("u_table")
val c = session.sql("select keyword,sum(clickorder) con,count(keyword) from u_table" +
" group by keyword,url order by con desc limit 20")
c.show(20)
session.stop()
}
}
case class Data(keyword:String,url:String,clickorder:Long)
数据第四个字段是空格切割的,并且会有的是缺失的数据,所以val clickorder中使用if对这些数据进行了过滤