flink 程序入口

import com.flinkRisk.config.FlinkConfig;
import com.flinkRisk.constant.OutSideTag;
import com.flinkRisk.model.*;
import com.flinkRisk.process.BaseBroadcastProcessFunction;
import com.flinkRisk.rule.BaseRule;
import com.flinkRisk.sink.ClickhouseRiskEventSink;
import com.flinkRisk.sink.rocketmq.AlarmEventRocketMqSink;
import com.flinkRisk.source.LocalPosSource;
import com.flinkRisk.source.MysqlRuleSource;
import com.flinkRisk.source.RocketMQPosInfoSource;
import com.flinkRisk.source.rocketmq.RocketMQConfig;
import com.flinkRisk.source.rocketmq.RocketMQSink;
import com.flinkRisk.source.rocketmq.RocketMQSource;
import com.flinkRisk.source.rocketmq.common.selector.DefaultTopicSelector;
import com.flinkRisk.source.rocketmq.common.serialization.AlarmEventSerializationSchema;
import com.flinkRisk.source.rocketmq.common.serialization.VehiclePosInfoDeserializationSchema;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironmen;
import java.io.IOException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Properties;
@Slf4j
public class Main {

    /**
     * 本地调试模式  部署线上时要改为false
     */
    public static boolean localMode = false;

    /*****************************************本地调试配置开始*****************************************/

    /**
     * 本地需要调试的业务,多个以;号隔开
     */
    public final static String biz = "FatigueDrivingAlarmHandler";

    /**
     * 本地调试时状态存储位置 localMode为true时使用
     */
    private static String stateBackend = "file:///E:/flinkRockDb";

    /**
     * 本地的轨迹文件路径  localMode为true时使用
     */
     private static String localPosDataPath = "E:\\flinkTestData\\1281.txt ";

    /**
     * 轨迹文件开始时间 localMode为true时使用
     */
    private static String startTime = "2022-03-20 05:30:00";
    /**
     * 轨迹文件结束时间 localMode为true时使用
     */
    private static String endTime = "2022-03-20 10:59:59";
    /**
     * 车辆id localMode为true时使用
     */
    private static Integer vehicleId = 9044748;
    /**
     * 车牌 localMode为true时使用
     */
    private static String plate = "05#-13920-D53";
    /*****************************************本地调试配置结束*****************************************/

    /**
     * 线上模式,需要将parameterTool分发给其他算子
     */
    private static ParameterTool parameterTool;
	
	public static void main(String[] args) throws Exception {

        log.info("启动应用...");
        //1、解析命令行参数
        init(args);
        //初始化clickhouse连接

        //checkpoint配置
        String stateBackendDirectory = "";
        //本地调试 状态存储路径
//        if (localMode) {
//            stateBackendDirectory = stateBackend;
//        }
        //线上环境 状态存储路径
        if (!localMode) {
            stateBackendDirectory = parameterTool.getRequired("state.backend.directory");
        }
         //rocketmq车辆定位数据流配置
        Properties consumerProps = new Properties();
        consumerProps.setProperty(RocketMQConfig.NAME_SERVER_ADDR, parameterTool.getRequired("rocketmq.nameserver.address"));
        consumerProps.setProperty(RocketMQConfig.CONSUMER_GROUP, parameterTool.getRequired("rocketmq.consumer.group"));
        consumerProps.setProperty(RocketMQConfig.CONSUMER_TOPIC, parameterTool.getRequired("rocketmq.consumer.topic"));


        //配置流配置


        //2、配置运行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(parameterTool);
//        线上设置StateBackend
        if(!localMode){
            env.setStateBackend(new FsStateBackend(stateBackendDirectory, true));
//        设置Checkpoint
            CheckpointConfig checkpointConfig = env.getCheckpointConfig();
            env.setBufferTimeout(1000);
            //两分钟超时  Connection reset by peer
            checkpointConfig.setCheckpointTimeout(120000);
            //两个检查点之间至少间隔5秒
            checkpointConfig.setMinPauseBetweenCheckpoints(5000);
            //启用非对齐检查点
            checkpointConfig.enableUnalignedCheckpoints(true);
            //设置多少秒持久化一次快照 单位ms
            checkpointConfig.setCheckpointInterval(parameterTool.getInt("checkpoint.interval"));
            checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        }
        //3、rocketMq车辆定位事件流

        DataStreamSource<VehiclePosInfo> rocketMqStream = null;
        //本地调试
        if (localMode) {
            LocalPosSource localPosSource = new LocalPosSource(localPosDataPath, startTime, endTime, vehicleId, plate);
            rocketMqStream = env.addSource(localPosSource);
        }
          //线上
        if (!localMode) {
            int rocketMqParallelism =  parameterTool.getInt("rocketMqParallelism");
            RocketMQSource<VehiclePosInfo> rocketMqSource = new RocketMQPosInfoSource(new VehiclePosInfoDeserializationSchema(), consumerProps);
            rocketMqStream = env.addSource(rocketMqSource).setParallelism(rocketMqParallelism);
        }
        rocketMqStream.name("rocketMq定位数据流");

        //4、Mysql配置流
        //自定义Mysql Source,周期性地从Mysql中获取配置,并广播出去
        MysqlRuleSource mysqlRuleSource = new MysqlRuleSource();
        DataStreamSource<BaseRule> configStream = env.addSource(mysqlRuleSource);
        configStream.name("mysql规则流");
        /*
          (1) 先建立MapStateDescriptor
          MapStateDescriptor定义了状态的名称、Key和Value的类型。
          这里,MapStateDescriptor中,key是Void类型,value是Map<String, Tuple2<String,Int>>类型。
         */
        final MapStateDescriptor<Long, BaseRule> configDescriptor = new MapStateDescriptor<>("rule", Types.LONG, Types.GENERIC(BaseRule.class));

        /*
          (2) 将配置流广播,形成BroadcastStream
         */
        BroadcastStream<BaseRule> broadcastConfigStream = configStream.broadcast(configDescriptor);
         //5、事件流和广播的配置流连接,形成BroadcastConnectedStream
        BroadcastConnectedStream<VehiclePosInfo, BaseRule> connectedStream = rocketMqStream.keyBy(VehiclePosInfo::getVehicleId).connect(broadcastConfigStream);

        //6、对BroadcastConnectedStream应用process方法,根据配置(规则)处理事件
        int baseBroadcastProcessParallelism = parameterTool.getInt("baseBroadcastProcessParallelism");
        SingleOutputStreamOperator<AlarmEvent> resultStream = connectedStream.process(new BaseBroadcastProcessFunction()).setParallelism(baseBroadcastProcessParallelism);
        //7、输出结果
        //线上环境
        f (!localMode) {
            Properties producerProps = new Properties();
            producerProps.setProperty(RocketMQConfig.NAME_SERVER_ADDR, parameterTool.getRequired("rocketmq.nameserver.address"));
            String topic = parameterTool.getRequired("rocketmq.producer.topic");
            String tag = parameterTool.get("risk-event");
            //没有延迟
            int msgDelayLevel = RocketMQConfig.MSG_DELAY_LEVEL00;
            producerProps.setProperty(RocketMQConfig.MSG_DELAY_LEVEL, String.valueOf(msgDelayLevel));
            //不进行批量发送
            boolean batchFlag = false;
            RocketMQSink<AlarmEvent> alarmEventRocketMQSink = new AlarmEventRocketMqSink(new AlarmEventSerializationSchema(),
                    new DefaultTopicSelector<>(topic,tag), producerProps).withBatchFlushOnCheckpoint(batchFlag);
            int dataToMysqlParallelism = parameterTool.getInt("dataToMysqlParallelism");
            resultStream.addSink(alarmEventRocketMQSink).setParallelism(dataToMysqlParallelism).name("报警事件流");
			}
			//侧向输入,写入clickhouse数据库
        DataStream<AlarmEvent> sideOutput = resultStream.getSideOutput(OutSideTag.RISK_EVENT);
        sideOutput.addSink(new ClickhouseRiskEventSink()).setParallelism(1).name("clickhouse输出流");

        //9、生成JobGraph,并开始执行
        env.execute("风险数据流分析(粤标)");
   }
    /**
     * 配置参数初始化
     *
     * @param args
     * @throws IOException
     */
     private static void init(String[] args) throws IOException {
		 //1、解析命令行参数
        ParameterTool fromArgs = ParameterTool.fromArgs(args);
        //本地模式
        if (localMode) {
            URL url = Thread.currentThread().getContextClassLoader().getResource("config.properties");
            String path = URLDecoder.decode(url.getPath(), "utf-8");
            parameterTool = ParameterTool.fromPropertiesFile(path);
            FlinkConfig.init(parameterTool);
        }
        /**
         * 线上模式
         */
        if (!localMode) {
            parameterTool = ParameterTool.fromPropertiesFile(fromArgs.getRequired("configFile"));
            FlinkConfig.init(parameterTool);
        }
	}
 }
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
samza转换为flink程序示例可以通过如下步骤实现: 1. 首先,需要编写一个main函数作为程序入口,并将其提交到Flink任务管理器中。 2. 使用Flink的DataStream API来定义输入流和输出流。可以使用FlinkKafkaConsumer将Kafka中的二进制数据转换为Java/Scala对象。 3. 使用Flink的转换操作来对输入流进行处理,例如过滤、映射、聚合等。这些转换操作可以以一个或多个stream作为输入,并产生一个或多个结果stream。 4. 将处理后的结果通过Flink的DataStream API写入到相应的输出源,例如Kafka、文件系统等。 下面是一个示例程序的伪代码: ```java public class SamzaToFlinkExample { public static void main(String[] args) throws Exception { // 设置执行环境 StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // 定义输入流 FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>("topic", new SimpleStringSchema(), properties); DataStream<String> inputStream = env.addSource(kafkaConsumer); // 转换操作 DataStream<String> transformedStream = inputStream.filter(str -> str.contains("keyword")); // 定义输出流 transformedStream.addSink(new FlinkKafkaProducer<>("output-topic", new SimpleStringSchema(), properties)); // 执行程序 env.execute("Samza to Flink Example"); } } ``` 以上是一个简单的示例,演示了如何将Samza程序转换为Flink程序。在实际应用中,根据具体需求,可以进行更复杂的转换操作和配置。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* [java编写flink任务示例](https://blog.csdn.net/feinifi/article/details/121293135)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] - *2* *3* [10.大数据技术之Flink](https://blog.csdn.net/wzb1983/article/details/125827993)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值