1.
sql=""
insert overwrite table app.app_od_info partition(dt='{startT}')
select
so_no,
so_num
from table1
"""
spark.sql(sql)
2.
sql="""
select
so_no,
so_num,
'{startT}' as dt
from table1
"""
df=spark.sql(sql).coalesce(5)
df.write.mode("append").insertInto('app.app_od_info', overwrite=True)
3.
sql="""
select
so_no,
so_num,
'{startT}' as dt
from table1
"""
df=spark.sql(sql).repartition(5).toDF()
df.write.mode("append").insertInto('app.app_od_info', overwrite=True)
4.
def import_hive(spark,input_path,table_name,partitionsKey,overwrite=True): if overwrite: load_sql="load data inpath '"+input_path+"' overwrite into table "+table_name+" partition(dt='"+partitionsKey+"')" else: load_sql="load data inpath '"+input_path+"' into table "+table_name+" partition(dt='"+partitionsKey+"')" spark.sql(load_sql)