#/usr/bin/env python2.7
-- coding: utf-8 --
import os
import string
import datetime
from pyspark.sql.types import *
from pyspark.sql import SparkSession
os.environ[‘PYSPARK_PYTHON’]=’/appcom/service/python27/bin/python2.7’
def main():
spark = SparkSession.builder
.master(“yarn”)
.appName(“data_management”)
.enableHiveSupport()
.getOrCreate()
begin = datetime.date(2016,10,11)
end = datetime.date(2017,6,19)
d = begin
delta = datetime.timedelta(days=1)
sql3 = "set mapred.job.queue.name=queue_ndf_dev"
spark.sql(sql3)
sql2 = "set yarn.scheduler.minimum-allocation-mb=2048"
spark.sql(sql2)
sql1 = "set hive.exec.dynamic.partition.mode=nonstrict"
spark.sql(sql1)
sql4 = "set yarn.nodemanager.pmem-check-enabled = false"
spark.sql(sql4)
sql5 = "set yarn.nodemanager.vmem-check-enabled = false"
spark.sql(sql5)
sql6 = "SET mapred.reduce.tasks=1"
spark.sql(sql6)
while d <= end:
# sql4 = "insert overwrite table data_mining.two partition (part_dt) select * from data_mining.f_xcf_user_vip_info \
# where part_dt = '"+ str(d) +"'distribute by part_dt "
sql4="atler table data_mining.f_ph_user_fin_liq add partition(part_dt='$d') location '/user/hive/warehouse/data_mining.db/f_ph_user_fin_liq/part_dt='$d'"
spark.sql(sql4)
print(sql4)
d = d + delta
if name == ‘main’:
main()
ps:str不用+会报语法错误,sql语句存在问题,如果分区存在会一直报错,所以最好
ALTER TABLE xxxDROP IF EXISTS PARTITION (xx=‘xxx’)
同时alter table xxx if not exists add partition( part_dt = xx) location ’ 目录’
spark-submit --master yarn --deploy-mode client --queue datamining --executor-memory 4g --num-executors 2 one.py