#!/bin/bash
# 获取指定月份的年份和月份
year=${1:0:4}
month=${1:4:2}
# 检查年份和月份是否合法
if [[ $year -eq 0 || $month -eq 0 || $month -gt 12 ]]; then
echo "输入的月份不合法"
exit 1
fi
# 获取指定月份的天数
days=$(cal $month $year | awk 'NF {DAYS = $NF}; END {print DAYS}')
base_dir=/appdata/jiayeli/riverMaps_btch""
function executorMonthDate() {
tbName=$1
for ((day=1; day<=days; day++)); do
#printf "%04d%02d%02d\n" $year $month $day
partitionDate=`printf "%04d%02d%02d\n" $year $month $day`
printf "== ------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"
printf "$(date +'%Y-%m-%d %H:%M:%S:%3N'): start job ."
printf "calculation date by date: $partitionDate\n"
printf "sh ${base_dir}/bin/run.sh ${tbName} dynamic ${partitionDate} spark.maxExecutors=15 spark.executor.memory=40GB\n"
sh ${base_dir}/bin/run.sh ${tbName} dynamic ${partitionDate} spark.maxExecutors=15 spark.executor.memory=40G
printf "$(date +'%Y-%m-%d %H:%M:%S:%3N'): job end ."
printf "== ------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"
done
}
hmaOneId="dwd_hma_one_id_df"
hmaVisitOneId="dwd_hma_visit_one_id_df"
hwaOneId="dwd_hwa_one_id_df"
hwaVisitOneId="dwd_hwa_visit_one_id_df"
hmaSessionIdOneId=""
executorMonthDate ${hmaOneId} > ../logs/${hmaOneId}.log 2>&1 &
executorMonthDate ${hmaVisitOneId} > ../logs/${hmaVisitOneId}.log 2>&1 &
executorMonthDate ${hwaOneId} > ../logs/${hwaOneId}.log 2>&1 &
executorMonthDate ${hwaVisitOneId} > ../logs/${hwaVisitOneId}.log 2>&1 &
shell 指定月份,批量执行每日的spark作业插入到分区
于 2023-12-08 21:49:42 首次发布