环境:paimon分区可以设置为自动失效,但官网也提供了手动方案,paimon 0.5版本只支持flink,本测试案例 flink 1.17.1 paimon 0.5,hive 3.1.2
案例
启动yarn-session
flink client操作
[root@ks2p-hadoop06 flink-1.17.1]# ./bin/yarn-session.sh
# 连接yarn session
[root@ks2p-hadoop06 bin]# ./sql-client.sh
Flink SQL> CREATE CATALOG paimon_hive WITH (
> 'type' = 'paimon',
> 'metastore' = 'hive',
> 'uri' = 'thrift://xx.xx.xx.xx:9083',
> 'warehouse' = 'hdfs:///data/hive/warehouse/paimon',
> 'default-database'='test'
> );
# 改为批模式执行
Flink SQL> SET 'execution.runtime-mode' = 'batch';
Flink SQL> INSERT OVERWRITE test.student2 /*+ OPTIONS('dynamic-partition-overwrite'='false') */ PARTITION (dt = '2023-10-09') SELECT id,name,age,birthday FROM test.student2 WHERE false;