5 数仓项目 - 商品维度数据装载
使用拉链表解决商品SCD问题
5.1 dw层建表
-- dw层建表
DROP TABLE IF EXISTS `itcast_dw`.`dim_goods`;
CREATE TABLE `itcast_dw`.`dim_goods`(
goodsId bigint,
goodsSn string,
productNo string,
goodsName string,
goodsImg string,
shopId bigint,
goodsType bigint,
marketPrice double,
shopPrice double,
warnStock bigint,
goodsStock bigint,
goodsUnit string,
goodsTips string,
isSale bigint,
isBest bigint,
isHot bigint,
isNew bigint,
isRecom bigint,
goodsCatIdPath string,
goodsCatId bigint,
shopCatId1 bigint,
shopCatId2 bigint,
brandId bigint,
goodsDesc string,
goodsStatus bigint,
saleNum bigint,
saleTime string,
visitNum bigint,
appraiseNum bigint,
isSpec bigint,
gallery string,
goodsSeoKeywords string,
illegalRemarks string,
dataFlag bigint,
createTime string,
isFreeShipping bigint,
goodsSerachKeywords string,
modifyTime string,
dw_start_date string,
dw_end_date string
)
STORED AS PARQUET;
5.2 具体步骤
拉链表设计一共分为以下几个步骤:
- 1、第一次全量导入
所有的ODS数据全部导入到拉链历史记录表中 - 2、增量导入(某天,举例:2018-09-09)
增量导入某天的数据到ODS分区
合并历史数据
通过连接查询方式更新
1全量导入
- 将所有 2019年09月08日以前创建的商品以及修改的数据全部导入到拉链历史记录表中
操作步骤: - 1、使用Kettle将20190908以前的数据抽取到ods
SELECT *
FROM itcast_ods.itcast_goods
WHERE DATE_FORMAT(createtime, '%Y%m%d') <= '20190908' OR DATE_FORMAT(modifyTime, '%Y%m%d') <= '20190908';
- 2、使用spark sql将全量数据导入到dw层维度表
set spark.sql.shuffle.partitions=1; --shuffle时的分区数,默认是200个
-- 使用spark sql将全量数据导入到dw层维度表
insert overwrite table `itcast_dw`.`dim_goods`
select
goodsId,
goodsSn,
productNo,
goodsName,
goodsImg,
shopId,
goodsType,
marketPrice,
shopPrice,
warnStock,
goodsStock,
goodsUnit,
goodsTips,
isSale,
isBest,
isHot,
isNew,
isRecom,
goodsCatIdPath,
goodsCatId,
shopCatId1,
shopCatId2,
brandId,
goodsDesc,
goodsStatus,
saleNum,
saleTime,
visitNum,
appraiseNum,
isSpec,
gallery,
goodsSeoKeywords,
illegalRemarks,
dataFlag,
createTime,
isFreeShipping,
goodsSerachKeywords,
modifyTime,
case when modifyTime is not null
then from_unixtime(unix_timestamp(modifyTime, 'yyyy-MM-dd HH:mm:ss'),'yyyy-MM-dd')
else from_unixtime(unix_timestamp(createTime, 'yy