spark集群升级之后,以orc格式写hdfs文件之后,导入hive表有部分字段为null的bug。hadoop 2.7.2 -->3.1.1;hive 1.2.1 -->3.1.0;spark 2.1.0 -->2.3.2
create external table if not exists birds. table_name
(
id string comment ''
isGreat string comment ''
updateTime string comment ''
)
comment ''
partitioned by (pt_d string comment'天分区')
row format delimited
fields terminated by '\001'
lines terminated by '\n'
stored as orc
location 'hdfs://hacluster/*/table_name'
tblproperties('orc.compress'='zlib');
val hiveoutDf=ss. createDataFrame(rdd[row],struct). reparation(1)
hiveoutDf. write. mode(''append''). format.("orc"). save(datapathpartition)
val sqlcode = "alter table table_name add if not exists partition (pt_d='"+date+"')"
ss. sql("use biads")
ss. sql(sqlcode)
val struct = StructType(Array(StructFiled("id",StringType) ,
StructFiled("isGreat",StringType)
StructFiled("updateTime",StringType)
))
发现集群升级之后,orc格式的文件导入hive表的时候大小写敏感,把struct里的字段都用小写即可
关注微信公众号【飞哥大数据】,回复666 获取2022年100+公司面试真题,以及spark与flink面试题汇总