使用slot分组 或者 disableChaining(),我们可以将算子的依赖链给隔离或者分开,这样可以针对不同的需求进行不同的优化。
package application; import com.alibaba.fastjson.JSONObject; import operator.*; import org.apache.commons.lang.StringUtils; import org.apache.flink.api.common.functions.FilterFunction; import org.apache.flink.api.common.functions.MapFunction; import org.apache.flink.api.common.serialization.SimpleStringSchema; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010; import org.apache.flink.util.OutputTag; import org.slf4j.LoggerFactory; import java.util.Properties; /** * todo 备忘记录:使用分流方式,分为单场景快速流,分为多场景或者需要查询hbase的复合流 * * todo 1,接下来要做的;了解并行度;添加水印;添加checkpoint;代码正规化; * * todo 2, 测试slot group ,通过 slotSharingGroup * * todo 3, disableChain,startNewChain的使用 * * todo 4, name()可以取名字哦 */ public class StormToFlink_hbase_demo { private static org.slf4j.Logger logger = LoggerFactory.getLogger(StormToFlink_hbase_demo.class); public static void main(String[] args) throws Exception { // String fileUrl = "D:\\wxgz-local\\resources_yace\\"; String fileUrl = "/zywa/job/storm/resources_new/";