在开发中,经常会遇到重刷以前脚本或提取以前数据,由于数据量极大,集群资源不够,只能按小时跑数据,怎样编写脚本输入开始日期、开始小时、结束日期、结束小时自动跑数呢?
提供以下脚本供参考:
#!/bin/sh
#*****************************************************************************************
#*** 程序功能: 按天和小时循环导入分区数据
#*** 输入参数: -s|--start-day <START_DAY> :需要导入的开始日期,格式YYYYMMDD
#*** -e|--end-day <END_DAY> :需要导入的结束日期,格式YYYYMMDD
#*** --start-hour <START_HOUR> :开始日期需要开始的小时,格式HH,可选参数,默认00
#*** --end-hour <END_HOUR> :结束日期需要结束的小时,格式HH,可选参数,默认23
#*** -f|--file <SOL_FILE> :SQL脚本绝对路径名称
#*** 编 写 人: fuyun
#*** 编写日期: 2020-01-22
#*** 修 改 人:
#*****************************************************************************************
umask 002
show_usage="[Usage] `date '+%F %T'` `basename $0` -s=YYYYMMDD -e=YYYYMMDD -f=filePath \n \
--start-day=YYYYMMDD --end-day=YYYYMMDD [--start-hour=HH] [--end-hour=HH] --file=filePath"
GETOPT_ARGS=`getopt -o s:e:f: -l start-day:,end-day:,start-hour::,end-hour::,file: -- "$@"`
# 将规范化后的命令行参数分配至位置参数($1,$2,...)
eval set -- "$GETOPT_ARGS"
# 开始小时默认为00,结束小时默认为23
#START_HOUR=00
#END_HOUR=23
# 获取参数
while [ -n "$1" ]
do
case "$1" in
-s|--start-day) START_DAY=$2; shift 2;; #shift 2表示参数后移2个
-e|--end-day) END_DAY=$2; shift 2;;
--start-hour)
case "$2" in
"") START_HOUR=00; shift 2;;
*) START_HOUR=$2; shift 2;;
esac;;
--end-hour)
case "$2" in
"") END_HOUR=23; shift 2;;
*) END_HOUR=$2; shift 2;;
esac;;
-f|--file) SOL_FILE=$2; shift 2;;
--) break ;;
*) echo $1,$2,${show_usage}; break ;;
esac
done
# 可选参数,设置默认值,开始小时默认为00,结束小时默认为23
start-hour=${start-hour:00}
end-hour=${end-hour:23}
# check parameter
if [ -z "${START_DAY}" -o -z "${END_DAY}" -o -z "${START_HOUR}" -o -z "${END_HOUR}" -o -z "${SOL_FILE}" ]; then
echo -e "${show_usage} \n"
exit 1
fi
# 判断小时参数是否为00-23,开始日期不大于结束日期
#if [ ${START_HOUR} -ge 24 ] || [ ${START_HOUR} -lt 0 ] || [ ${END_HOUR} -ge 24 ] || [ ${END_HOUR} -lt 0 ] ]; then
# printf "[ERROR] `date '+%F %T'` --start-day:${START_DAY} greater than --end-day:${END_DAY}\n"
# exit 1
#fi
FLAG_DAY=${START_DAY}
LOG_FILE=$(echo "${SOL_FILE}" | awk -F"/" '{print $NF}' | awk -F"." '{print $1}')
LOG_PATH=/home/fuyun/logs/${LOG_FILE}
printf "[INFO] `date '+%F %T'` execute start day and hour is ${START_DAY} ${START_HOUR}\n"
printf "[INFO] `date '+%F %T'` execute end day and hour is ${END_DAY} ${END_HOUR}\n"
printf "[INFO] `date '+%F %T'` execute SQL file is ${SOL_FILE}\n"
printf "[INFO] `date '+%F %T'` looping execute.......\n"
executeHive() {
while :
do
local STAT_HOUR=00
local FLAG_HOUR=23
local START_DAY=$(date -d "$START_DAY 1day" +%Y%m%d)
local STAT_DAY=`date -d "${START_DAY} -1 day" +%Y%m%d`
# 判断执行日期是否为开始日期或结束日期
if [ ${STAT_DAY} -eq ${FLAG_DAY} ]; then
STAT_HOUR=${START_HOUR}
elif [ ${STAT_DAY} -eq ${END_DAY} ]; then
FLAG_HOUR=${END_HOUR}
fi
while :
do
local LOG_PATH=/home/fuyun/logs/${LOG_FILE}_${STAT_DAY}_${STAT_HOUR}.log
local SEVEN_DAY_BEFOR_LOG_PATH=/home/fuyun/logs/${LOG_FILE}_${SEVEN_DAY_BEFOR}*.log
printf "[INFO] `date '+%F %T'` log file name is ${LOG_PATH}\n"
printf "[INFO] `date '+%F %T'` current execute partition is dt=${STAT_DAY} hour=${STAT_HOUR}\n"
local startTime=`date '+%F %T'`
local startSeconds=$(date --date="${startTime}" +%s)
printf "[INFO] `date '+%F %T'` hivesql execute start time is ${startTime}\n"
# 执行hiveSQL脚本
hive --hiveconf DT=${STAT_DAY} --hiveconf HOUR=${STAT_HOUR} -f ${SOL_FILE}>>${LOG_PATH}.tmp 2>> ${LOG_PATH}
exitCode=$?
if [ $exitCode -ne 0 ];then
printf "[ERROR] `date '+%F %T'` hivesql execute ${SOL_FILE} is failed!!!\n"
exit $exitCode
else
local endTime=`date '+%F %T'`
local endSeconds=$(date --date="${endTime}" +%s)
printf "[INFO] `date '+%F %T'` hivesql execute end time is ${endTime}\n"
printf "[INFO] `date '+%F %T'` hivesql execute time is $(($((endSeconds-startSeconds))/60)) minutes\n"
printf "[INFO] `date '+%F %T'` hivesql ${STAT_DAY} data execute success!!!\n"
fi
# 因为小时小于10时前面有0,shell脚本会认为是八进制数字,所以要转为十进制数字
STAT_HOUR=$((10#$STAT_HOUR+1))
# 判断小时是否小于10
if [ ${STAT_HOUR} -lt 10 ]; then
STAT_HOUR=0${STAT_HOUR}
fi
printf "[INFO] `date '+%F %T'` delete temp log ${LOG_PATH}.tmp\n"
rm ${LOG_PATH}.tmp
# 如果执行小时大于设置的结束小时,退出循环
if [ ${STAT_HOUR} -gt ${FLAG_HOUR} ]; then
break;
fi
done
find ${SEVEN_DAY_BEFOR_LOG_PATH} 2>/home/fuyun/logs/err.log.tmp
if [ $? -ne 0 ];then
printf "[WARN] `date '+%F %T'` ${SEVEN_DAY_BEFOR_LOG_PATH}: No such file or directory\n"
rm /home/fuyun/logs/err.log.tmp
else
printf "[INFO] `date '+%F %T'` delete log ${SEVEN_DAY_BEFOR_LOG_PATH}\n"
rm ${SEVEN_DAY_BEFOR_LOG_PATH}
fi
# 判断执行日期是否等于结束日期,等于则退出循环
if [ ${STAT_DAY} -eq ${END_DAY} ]; then
break;
fi
done
}
if [ ${START_DAY} -gt ${END_DAY} ]; then
printf "[ERROR] `date '+%F %T'` --start-day:${START_DAY} greater than --end-day:${END_DAY}\n"
exit 1
elif [ ${START_HOUR} -ge 24 ] || [ ${START_HOUR} -lt 0 ] || [ ${END_HOUR} -ge 24 ] || [ ${END_HOUR} -lt 0 ]; then
printf "[ERROR] `date '+%F %T'` --start-hour:${START_DAY} or --end-hour:${END_DAY} is not 00-23 \n"
exit 1
else
executeHive
printf "[INFO] `date '+%F %T'` hivesql ${START_DAY}_${START_HOUR} to ${END_DAY}_${END_HOUR} data execute success!!!\n"
fi
从2020年1月22日的21点执行到2020年1月23日的22点,执行以下命令
sh excuteHourHiveSQLTest.sh --start-day 20200122 --end-day 20200123 --start-hour=21 --end-hour=22 --file SQL/temp_hour_test.sql
从2020年1月22日的00点执行到2020年1月23日的23点,执行以下命令
sh excuteHourHiveSQLTest.sh --start-day 20200122 --end-day 20200123 --file SQL/temp_hour_test.sql
或者以下命令
sh excuteHourHiveSQLTest.sh -s 20200122 -e 20200123 -f SQL/temp_hour_test.sql