hive
select * from table where dt = ${hivevar:dt}
#!/bin/bash
yesterday=`date -d -1day +%Y%m%d`
if [ ! -n "$1" ]
then
echo "没有传入参数"
else
yesterday="$1"
echo "yesterday=$yesterday"
fi
set hive.exec.dynamic.partition=true;
set hive.exec.dynamic.partition.mode=nonstrict;
v_sql="alter table ods.odstable drop if exists partition (ds=$yesterday);
alter table ods.odstable add if not exists partition (ds=$yesterday)"
echo $v_sql
hive -e "$v_sql;"
insert overwrite table dwd.dwd_user_builds partition (ds='${hivevar:bizdate}')
SELECT
*
FROM
(SELECT * FROM ods.ods_usr_user where ds = '${hivevar:bizdate}' ) A
spark-sql
spark-sql -d dt=2020 -f test.sql
select * from table where dt = ${dt}
datax
python /bigdata/binfile/datax/bin/datax.py -p"-Dtable=ods_fin_bill_item_detail -Dyesterday='ds='$yesterday" test.json
{
"job": {
"content": [
{
"reader": {
"name": "hdfsreader",
"parameter": {
"column": [{"index":0,"type":"long"},{"index":1,"type":"string"},
{"index":2,"type":"String"},
{"index":3,"type":"Long"},
{"index":4,"type":"Long"},
{"index":5,"type":"Date"},
{"index":6,"type":"Date"} ],
"defaultFS": "hdfs://master:21020",
"encoding": "UTF-8",
"fieldDelimiter": "\t",
"fileType": "orc",
"path": "/user/hive/warehouse/dwd.db/tablename/${yesterday}"
}
},
"writer": {
"name": "mysqlwriter",
"parameter": {
"column": ["*"],
"connection": [
{ "jdbcUrl": "jdbc:mysql://xxxx:3306/user_system?useUnicode=true&characterEncoding=utf8",
"table": ["xxx"]
}
],
"password": "xxx",
"preSql": ["truncate table xxx"],
"session": [],
"username": "xxx",
"writeMode": "insert"
}
}
}
],
"setting": {
"speed": {
"channel": "1"
}
}
}
}
博主公众号
求关注