各区域热门商品Top3
1、需求分析
这里的热门商品是从点击量的维度来看的,计算各个区域前三大热门商品,并备注上每个商品在主要城市中的分布比例,超过两个城市用其他显示。
例如:
地区 商品名称 点击次数 城市备注
华北 商品A 100000 北京21.2%,天津13.2%,其他65.6%
华北 商品P 80200 北京63.0%,太原10%,其他27.0%
华北 商品M 40000 北京63.0%,太原10%,其他27.0%
东北 商品J 92000 大连28%,辽宁17.0%,其他 55.0%
数据集在我的资源里边有
2、思路分析
(1)使用 sql 来完成,碰到复杂的需求,可以使用 udf 或 udaf
(2)查询出来所有的点击记录,并与 city_info 表连接,得到每个城市所在的地区,与 Product_info 表连接得到产品名称
(3)按照地区和商品名称分组,统计出每个商品在每个地区的总点击次数
(4)每个地区内按照点击次数降序排列
(5)只取前三名,并把结果保存在数据库中
(6)城市备注需要自定义 UDAF 函数
3、具体实现:
1)导入数据
CREATE TABLE `user_visit_action`(
`date` string,
`user_id` bigint,
`session_id` string,
`page_id` bigint,
`action_time` string,
`search_keyword` string,
`click_category_id` bigint,
`click_product_id` bigint,
`order_category_ids` string,
`order_product_ids` string,
`pay_category_ids` string,
`pay_product_ids` string,
`city_id` bigint)
row format delimited fields terminated by ',' STORED AS TEXTFILE;
load data local inpath '/tmp/user_visit_action1.txt' into table test.user_visit_action;
CREATE TABLE `product_info`(
`product_id` bigint,
`product_name` string,
`extend_info` string)
row format delimited fields terminated by ',' STORED AS TEXTFILE;
load data local inpath '/tmp/product_info1.txt' into table test.product_info;
CREATE TABLE `city_info`(
`city_id` bigint,
`city_name` string,
`area` string)
row format delimited fields terminated by ',' STORED AS TEXTFILE;
load data local inpath '/tmp/city_info1.txt' into table test.city_info;
注意:本次操作平台是基于ambari2.7.3 HDP3.1 hive是3.0 版本 spark2.3。
遇到的问题1,由于HDP3.0 集成了hive 3.0和 spark 2.3,然而spark却读取不了hive表的数据,准确来说是内表的数据。
原因
hive 3.0之后默认开启ACID功能,而且新建的表默认是ACID表。而spark目前还不支持hive的ACID功能,因此无法读取ACID表的数据.
请看:https://issues.apache.org/jira/browse/SPARK-15348
解决办法
修改以下参数让新建的表默认不是acid表。
hive.strict.managed.tables=false
hive.create.as.insert.only=false
metastore.create.as.acid=false
参考自
https://www.cnblogs.com/ppw25/p/11670709.html
遇到的问题2,外部idea 没有权限读取hive表
目前采用了较为沙雕的操作 hadoop fs -chmod -R 777 /warehouse
初始代码1
package day09
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
/**
* @author yangkun
* @date 2020/10/31 18:35
* @version 1.0
*/
object Spark03_TopN {
def main(args: Array[String]): Unit = {
//创建配置文件对象
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL01_MySQL")
//创建SparkSession对象
val spark: SparkSession = SparkSession.builder().enableHiveSupport().config(conf).getOrCreate()
//选择Hive库
spark.sql("use test")
// spark.sql("show tables").show()
//--1.1从用户行为表中,查询所有点击记录,并和city_info,product_info进行连接
spark.sql("select *from product_info").show()
spark.sql(
"""
|select
| c.*,
| p.product_name
|from
| user_visit_action a
|join
| city_info c
|on
| a.city_id = c.city_id
|join
| product_info p
|on
| a.click_product_id = p.product_id
|where
| a.click_product_id != -1
""".stripMargin).createOrReplaceTempView("t1")
//--1.2按照地区和商品的名称进行分组,统计出每个地区每个商品的总点击数
spark.sql(
"""
|select
| t1.area,
| t1.product_name,
| count(*) as product_click_count
|from
| t1
|group by t1.area,t1.product_name
""".stripMargin).show()
spark.stop()
}
}
最终版
package day09
import java.text.DecimalFormat
import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.{DataType, LongType, MapType, StringType, StructField, StructType}
/**
* @author yangkun
* @date 2020/10/31 18:35
* @version 1.0
*/
object Spark03_TopN {
def main(args: Array[String]): Unit = {
//创建配置文件对象
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL01_MySQL")
//创建SparkSession对象
val spark: SparkSession = SparkSession.builder().enableHiveSupport().config(conf).getOrCreate()
//选择Hive库
spark.sql("use test")
//注册自定义函数
spark.udf.register("city_remark",new CityClickUDAF)
//--1.1从用户行为表中,查询所有点击记录,并和city_info,product_info进行连接
// spark.sql("select *from product_info").show()
spark.sql(
"""
|select
| c.*,
| p.product_name
|from
| user_visit_action a
|join
| city_info c
|on
| a.city_id = c.city_id
|join
| product_info p
|on
| a.click_product_id = p.product_id
|where
| a.click_product_id != -1
""".stripMargin).createOrReplaceTempView("t1")
//--1.2按照地区和商品的名称进行分组,统计出每个地区每个商品的总点击数
spark.sql(
"""
|select
| t1.area,
| t1.product_name,
| count(*) as product_click_count,
| city_remark(t1.city_name)
|from
| t1
|group by t1.area,t1.product_name
""".stripMargin).createOrReplaceTempView("t2")
//--1.3针对每个地区,对商品点击数进行降序排序
spark.sql(
"""
|select
| t2.*,
| row_number() over(partition by t2.area order by t2.product_click_count desc) cn
|from
| t2
""".stripMargin).createOrReplaceTempView("t3")
//1.4取当前地区的前3名
spark.sql(
"""
|select
| *
|from
| t3
|where t3.cn <= 3
""".stripMargin).show(false)
//释放资源
spark.stop()
}
}
//自定义一个UDAF聚合函数,完成城市点击量统计
class CityClickUDAF extends UserDefinedAggregateFunction{
//输入数据类型
override def inputSchema: StructType = {
StructType(Array(StructField("city_name",StringType)))
}
//缓存的数据类型 用Map缓存城市以及该城市点击数 :北京->2,天津->3 总的点击量Long: 北京2 + 天津3 = 5
override def bufferSchema: StructType = {
StructType(Array(StructField("city_count",MapType(StringType,LongType)),StructField("total_count",LongType)))
}
//输出的数据类型 北京21.2%,天津13.2%,其他65.6%
override def dataType: DataType = StringType
//稳定性
override def deterministic: Boolean = false
//为缓存数据进行初始化
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0) = Map[String,Long]()
buffer(1) = 0L
}
//对缓存数据进行更新
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
//从输入的数据中,获取城市的名称
val cityName: String = input.getString(0)
//从缓存中获取存放城市点击量的Map集合
val map: Map[String, Long] = buffer.getAs[Map[String,Long]](0)
//城市点击量 + 1
buffer(0) = map + (cityName -> (map.getOrElse(cityName,0L) + 1L))
//总点击量 + 1
buffer(1) = buffer.getAs[Long](1) + 1L
}
//分区间的缓存合并
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
//获取每一个节点城市点击缓存Map
val map1: Map[String, Long] = buffer1.getAs[Map[String,Long]](0)
val map2: Map[String, Long] = buffer2.getAs[Map[String,Long]](0)
//合并两个节点上的城市点击
buffer1(0) = map1.foldLeft(map2){
case (mm,(k,v))=>{
mm + (k->(mm.getOrElse(k,0L) + v))
}
}
//合并两个节点上的总点击数
buffer1(1) = buffer1.getLong(1) + buffer2.getLong(1)
}
//得到最终的输出效果 北京21.2%,天津13.2%,其他65.6%
override def evaluate(buffer: Row): Any = {
//从缓存中获取数据
val cityCountMap: Map[String, Long] = buffer.getAs[Map[String,Long]](0)
val totalCount: Long = buffer.getAs[Long](1)
//对Map集合中城市点击记录进行降序排序,取前2个
val sortList: List[(String, Long)] = cityCountMap.toList.sortBy(-_._2).take(2)
//计算排名前2的点击率
var citysRatio: List[CityRemark] = sortList.map {
case (cityName, count) => {
CityRemark(cityName, count.toDouble / totalCount)
}
}
//如果城市的个数超过2个,那么其它情况的处理
if(cityCountMap.size > 2){
citysRatio = citysRatio :+ CityRemark("其它",citysRatio.foldLeft(1D)(_ - _.cityRatio))
}
// Array(1,2,3).foldRight()
citysRatio.mkString(",")
}
}
case class CityRemark(cityName: String, cityRatio: Double) {
val formatter = new DecimalFormat("0.00%")
override def toString: String = s"$cityName:${formatter.format(cityRatio)}"
}