from pyflink.table import EnvironmentSettings, TableEnvironment
from pyflink.table import EnvironmentSettings, TableEnvironment, DataTypes
from pyflink.table.udf import TableFunction,udtf
from pyflink.table.expressions import call
from pyflink.table.udf import udf
# 1. 创建 TableEnvironment
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
table_env = TableEnvironment.create(env_settings)
# 2. 创建 source 表
table_env.execute_sql("""
CREATE TABLE datagen (
i INT,
j INT,
m VARCHAR
) WITH (
'connector' = 'kafka',
'topic' = 'flink_udftest1',
'properties.bootstrap.servers' = '10.1:9092',
'properties.group.id' = 'test_PrintUdf',
'scan.startup.mode' = 'latest-offset',
'format' = 'json'
)
""")
@udf(input_types=[DataTypes.INT(), DataTypes.INT()], result_type=DataTypes.INT())
def add(i, j):
return i + j
table_env.register_function("add", add)
# 3. 创建 sink 表i INT,
# j INT,m VARCHAR
table_env.execute_sql("""
CREATE TABLE print (
i INT,
numsum INT
) WITH (
'connector' = 'print'
)
""")
# 4. 查询 source 表,同时执行计算
# 通过 Table API 创建一张表:
source_table = table_env.from_path("datagen")
# 或者通过 SQL 查询语句创建一张表:
#source_table = table_env.sql_query("SELECT * FROM datagen")
table_env.sql_update("insert into print select i, add(i,j) from datagen")
table_env.execute("job_sum")
python实现pyflink的scalar自定义函数
最新推荐文章于 2024-07-25 22:41:30 发布