set hive.execution.engine = spark;
set hive.vectorized.execution.enabled = true;
set hive.vectorized.execution.reduce.enabled = true;
set hive.exec.parallel = true;
set hive.limit.optimize.enable = true;
set mapred.reduce.tasks = 300;
set hive.auto.convert.join = true;
set hive.optimize.skewjoin = true;
set hive.exec.compress.output=true;
set mapred.output.compress=true;
set mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec;
set io.compression.codecs=org.apache.hadoop.io.compress.GzipCodec;
set mapred.reduce.tasks=200;
set mapreduce.reduce.shuffle.memory.limit.percent=0.10;
set yarn.nodemanager.pmem-check-enabled=false;
set yarn.nodemanager.vmem-check-enabled=false;
set mapreduce.map.memory.mb=3072;
set mapreduce.reduce.memory.mb=3072;
WHERE pt = from_unixtime(unix_timestamp(date_sub(from_unixtime(unix_timestamp(),'yyyy-MM-dd'),1),'yyyy-MM-dd'),'yyyyMMdd')
-- hive在shell上执行
-- hive_data.sh
table_name='t_news'
tt=$(date -d '-3 days' +%Y%m%d) --'20180422'
hive(gohive) -e "select * from ${table_name} limit 5;" -- method_1
hive(gohive) -hiveconf table_name='t_news' -f news.sql -- method_2
-- news.sql: select * from ${hiveconf: table_name} limit 5;