HBase中对四个维表进行建表
create table gmall_base_category3 ( id varchar primary key ,info.name varchar, info.category2_id varchar )SALT_BUCKETS = 3
create table gmall_base_trademark ( id varchar primary key ,info.tm_name varchar)SALT_BUCKETS = 3
create table gmall_sku_info ( id varchar primary key ,info.spu_id varchar, info.price varchar,info.sku_name varchar,info.tm_id varchar,
info.category3_id varchar,info.create_time varchar,info.category3_name varchar,info.spu_name varchar,info.tm_name varchar )SALT_BUCKETS = 3
create table gmall_spu_info ( id varchar primary key ,info.spu_name varchar)SALT_BUCKETS = 3
Bean总体结构
创建OrderDetail
scala\com\atguigu\gmall\realtime\bean\OrderDetail.scala
case class OrderDetail(
id: Long,
order_id:Long,
sku_id: Long,
order_price: Double,
sku_num:Long,
sku_name: String,
create_time: String,
var spu_id: Long,
var tm_id: Long,
var category3_id: Long,
var spu_name: String,
var tm_name: String,
var category3_name: String
)
分别创建sku、spu、商标、类别
scala\com\atguigu\gmall\realtime\bean\dim\BaseCategory3.scala
case class BaseCategory3(id:String ,
name:String ,
category2_id:String ) {
}
scala\com\atguigu\gmall\realtime\bean\dim\BaseTrademark.scala
case class BaseTrademark(tm_id:String , tm_name:String) {
}
scala\com\atguigu\gmall\realtime\bean\dim\SkuInfo.scala
case class SkuInfo(id:String ,
spu_id:String ,
price:String ,
sku_name:String ,
tm_id:String ,
category3_id:String ,
create_time:String,
var category3_name:String,
var spu_name:String,
var tm_name:String) {
}
scala\com\atguigu\gmall\realtime\bean\dim\SpuInfo.scala
case class SpuInfo(id:String , spu_name:String ) {
}
BaseCategory3APP
scala\com\atguigu\gmall\realtime\app\dim\BaseCategory3App.scala
import com.alibaba.fastjson.JSON
import com.atguigu.gmall.realtime.bean.dim.BaseCategory3
import com.atguigu.gmall.realtime.utils.{
MyKafkaUtil, OffsetManagerUtil}
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{
DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{
HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{
Seconds, StreamingContext}
object BaseCategory3App {
def main(args: Array[String]): Unit = {
val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("dim_base_category3_app")
val ssc = new StreamingContext(sparkConf, Seconds(5))
val topic = "ODS_T_BASE_CATEGORY3";
val groupId = "base_category3_group"
/ 偏移量处理///
val offset: Map[TopicPartition, Long] = OffsetManagerUtil.getOffset(groupId, topic)
var inputDstream: InputDStream[ConsumerRecord[String, String]] = null
// 判断如果从redis中读取当前最新偏移量 则用该偏移量加载kafka中的数据 否则直接用kafka读出默认最新的数据
if (offset != null && offset.size > 0) {
inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, offset, groupId)
} else {
inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)
}
//取得偏移量步长
var offsetRanges: Array[OffsetRange] = null
val inputGetOffsetDstream: DStream[ConsumerRecord[String, String]] = inputDstream.transform {
rdd =>
offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
rdd
}
// 转换结构
val objectDstream: DStream[BaseCategory3] = inputGetOffsetDstream.map {
record =>
val jsonStr: String = record.value()
val obj: BaseCategory3 = JSON.parseObject(jsonStr, classOf[BaseCategory3])
obj
}
// 存储到HBase中
objectDstream.foreachRDD{
rdd=>
import org.apache.phoenix.spark._
rdd.saveToPhoenix("GMALL_BASE_CATEGORY3",