动态挂在分区脚本

#/usr/bin/env python2.7

-- coding: utf-8 --

import os
import string
import datetime
from pyspark.sql.types import *
from pyspark.sql import SparkSession
os.environ[‘PYSPARK_PYTHON’]=’/appcom/service/python27/bin/python2.7’

def main():
spark = SparkSession.builder
.master(“yarn”)
.appName(“data_management”)
.enableHiveSupport()
.getOrCreate()

begin = datetime.date(2016,10,11)
end = datetime.date(2017,6,19)
d = begin
delta = datetime.timedelta(days=1)

sql3 = "set mapred.job.queue.name=queue_ndf_dev"
spark.sql(sql3)
sql2 = "set yarn.scheduler.minimum-allocation-mb=2048"
spark.sql(sql2)
sql1 = "set hive.exec.dynamic.partition.mode=nonstrict"
spark.sql(sql1)
sql4 = "set yarn.nodemanager.pmem-check-enabled = false"
spark.sql(sql4)
sql5 = "set yarn.nodemanager.vmem-check-enabled = false"
spark.sql(sql5)
sql6 = "SET mapred.reduce.tasks=1"
spark.sql(sql6)


while d <= end:
 #   sql4 = "insert overwrite table data_mining.two partition (part_dt) select * from data_mining.f_xcf_user_vip_info \
     #       where part_dt = '"+ str(d) +"'distribute by part_dt "
    sql4="atler table  data_mining.f_ph_user_fin_liq add partition(part_dt='$d') location '/user/hive/warehouse/data_mining.db/f_ph_user_fin_liq/part_dt='$d'"
    spark.sql(sql4)
    print(sql4)
    d = d + delta

if name == ‘main’:
main()

ps:str不用+会报语法错误,sql语句存在问题,如果分区存在会一直报错,所以最好
ALTER TABLE xxxDROP IF EXISTS PARTITION (xx=‘xxx’)
同时alter table xxx if not exists add partition( part_dt = xx) location ’ 目录’

spark-submit --master yarn --deploy-mode client --queue datamining --executor-memory 4g --num-executors 2 one.py

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值