Flink1.11.2-1Json-sideProcess

Flink1.11.2-1Json-sideProcess

ToHdfs

package com.flink.app.toHdfs;

import com.flink.app.parJson.P_PageLog;
import com.flink.app.parJson.P_PageLog2;
import com.flink.bean.PageLog;
import com.flink.bean.PageLog2;
import com.hdfs.HdfsUtils;
import com.kafka.kafka_api.KafkaUtils;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import org.apache.flink.util.OutputTag;

public class ToHdfs {
    public static void main(String[] args) {
        // 创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        FlinkKafkaConsumer011 kafkaSource = KafkaUtils.getFlinkKafkaConsumer("pageLog");
        DataStreamSource<String> dsJsonStr = env.addSource(kafkaSource);
        SingleOutputStreamOperator<String> processDS = dsJsonStr.process(new MySideProcess());
        OutputTag<String> pageLogOutputTag = new OutputTag<String>("pageLog"){};
        //pageLog sink
        processDS.getSideOutput(pageLogOutputTag)
                .map(new MapFunction<String, PageLog>() {
                    @Override
                    public PageLog map(String in) throws Exception {
                        PageLog pageLog = P_PageLog.getPageLog(in);
                        System.out.println(pageLog);
                        return pageLog;
                    }
                })
//                .print("pageLog")
                .addSink(HdfsUtils.getPageLogBucketingSink("hdfs://192.168.1.162:8020/kafkaTohdfs/pageLog"));

        //pageLog2 sink
        OutputTag<String> pageLog2OutputTag = new OutputTag<String>("pageLog2"){};
        processDS.getSideOutput(pageLog2OutputTag)
                .map(new MapFunction<String, PageLog2>() {
                    @Override
                    public PageLog2 map(String in) throws Exception {
                        PageLog2 pageLog2 = P_PageLog2.getPageLog2(in);
                        System.out.println(pageLog2);
                        return pageLog2;
                    }
                })
//                .print("pageLog2")
                .addSink(HdfsUtils.getPageLog2BucketingSink("hdfs://192.168.1.162:8020/kafkaTohdfs/pageLog2/"));
        // BucketingSink sink = HdfsUtils.getBucketingSink("hdfs://192.168.1.162:8020/kafkaTohdfs/");
        //dsStr.addSink(sink);




        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }


}

MySideProcess

package com.flink.app.toHdfs;

import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

public class MySideProcess extends ProcessFunction<String,String> {
    OutputTag<String> pageLog = new OutputTag<String>("pageLog"){};
    OutputTag<String> pageLog2 = new OutputTag<String>("pageLog2"){};
    @Override
    public void processElement(String value, Context ctx, Collector<String> out) throws Exception {
        ctx.output(pageLog,value);
        ctx.output(pageLog2,value);
        out.collect(value);
    }
}

HdfsUtils

package com.hdfs;

import com.flink.bean.PageLog;
import com.flink.bean.PageLog2;
import org.apache.flink.streaming.connectors.fs.StringWriter;
import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;

public class HdfsUtils {
    public static  BucketingSink getPageLogBucketingSink(String basePath ){
        //basePath="hdfs://192.168.1.162:8020/kafkaTohdfs/"
        BucketingSink<PageLog> sink = new BucketingSink<>(basePath);
//通过这样的方式来实现数据跨天分区
//        sink.setBucketer(new EventTimeBucketer<PageLog>("yyyy/MM/dd"));
        sink.setBucketer(new PageLogEventTimeBucketer("yyyy-MM-dd/HH"));
        sink.setWriter(new StringWriter<>());
        // sink.setBatchSize(1024 * 1024 * 256L);
        sink.setBatchRolloverInterval(1 * 60 * 1000L);   //时间
        //sink.setInactiveBucketThreshold(3 * 60 * 1000L);
        //sink.setInactiveBucketCheckInterval(30 * 1000L);
        //设置的是检查两次检查桶不活跃的情况的周期
        sink.setInactiveBucketCheckInterval(1*60*1000L);
        //设置的是关闭不活跃桶的阈值,多久时间没有数据写入就关闭桶
        sink.setInactiveBucketThreshold(1*60*1000L);

        sink.setInProgressSuffix(".in-progress");
        sink.setPendingSuffix(".pending");
        return sink;
    }
    public static  BucketingSink getPageLog2BucketingSink(String basePath ){
        //basePath="hdfs://192.168.1.162:8020/kafkaTohdfs/"
        BucketingSink<PageLog2> sink = new BucketingSink<>(basePath);
//通过这样的方式来实现数据跨天分区
//        sink.setBucketer(new EventTimeBucketer<PageLog>("yyyy/MM/dd"));
        sink.setBucketer(new PageLog2EventTimeBucketer("yyyy-MM-dd/HH"));
        sink.setWriter(new StringWriter<>());
        // sink.setBatchSize(1024 * 1024 * 256L);
        sink.setBatchRolloverInterval(1 * 60 * 1000L);   //时间
        //sink.setInactiveBucketThreshold(3 * 60 * 1000L);
        //sink.setInactiveBucketCheckInterval(30 * 1000L);
        //设置的是检查两次检查桶不活跃的情况的周期
        sink.setInactiveBucketCheckInterval(1*60*1000L);
        //设置的是关闭不活跃桶的阈值,多久时间没有数据写入就关闭桶
        sink.setInactiveBucketThreshold(1*60*1000L);

        sink.setInProgressSuffix(".in-progress");
        sink.setPendingSuffix(".pending");
        return sink;
    }
}

PageLog2EventTimeBucketer

package com.hdfs;
import com.flink.bean.PageLog2;
import org.apache.flink.streaming.connectors.fs.Clock;
import org.apache.flink.streaming.connectors.fs.bucketing.Bucketer;
import org.apache.hadoop.fs.Path;

import java.io.IOException;
import java.io.ObjectInputStream;
import java.time.Instant;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;

public class PageLog2EventTimeBucketer implements Bucketer<PageLog2> {
    private static final String DEFAULT_FORMAT_STRING = "yyyy/MM/dd";
    private final String formatString;
    private final ZoneId zoneId;
    private transient DateTimeFormatter dateTimeFormatter;
    public PageLog2EventTimeBucketer() {
        this(DEFAULT_FORMAT_STRING);
    }
    public PageLog2EventTimeBucketer(String formatString) {
        this(formatString, ZoneId.systemDefault());
    }
    public PageLog2EventTimeBucketer(ZoneId zoneId) {
        this(DEFAULT_FORMAT_STRING, zoneId);
    }
    public PageLog2EventTimeBucketer(String formatString, ZoneId zoneId) {
        this.formatString = formatString;
        this.zoneId = zoneId;
        this.dateTimeFormatter = DateTimeFormatter.ofPattern(this.formatString).withZone(this.zoneId);
    }
    //记住,这个方法一定要加,否则dateTimeFormatter对象会是空,此方法会在反序列的时候调用,这样才能正确初始化dateTimeFormatter对象
    //那有的人问了,上面构造函数不是初始化了吗?反序列化的时候是不走构造函数的
    private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
        in.defaultReadObject();
        this.dateTimeFormatter = DateTimeFormatter.ofPattern(formatString).withZone(zoneId);
    }
    @Override
    public Path getBucketPath(Clock clock, Path basePath, PageLog2 element) {
        String newDateTimeString = dateTimeFormatter.format(Instant.ofEpochMilli(element.getuId()*1000));
      // System.out.println(element.getuId());
       // System.out.println(newDateTimeString);
        return new Path(basePath + "/" + newDateTimeString);
    }
}

PageLogEventTimeBucketer

package com.hdfs;
import com.flink.bean.PageLog;
import org.apache.flink.streaming.connectors.fs.Clock;
import org.apache.flink.streaming.connectors.fs.bucketing.Bucketer;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.time.Instant;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;

public class PageLogEventTimeBucketer implements Bucketer<PageLog> {
    private static final String DEFAULT_FORMAT_STRING = "yyyy/MM/dd";
    private final String formatString;
    private final ZoneId zoneId;
    private transient DateTimeFormatter dateTimeFormatter;
    public PageLogEventTimeBucketer() {
        this(DEFAULT_FORMAT_STRING);
    }
    public PageLogEventTimeBucketer(String formatString) {
        this(formatString, ZoneId.systemDefault());
    }
    public PageLogEventTimeBucketer(ZoneId zoneId) {
        this(DEFAULT_FORMAT_STRING, zoneId);
    }
    public PageLogEventTimeBucketer(String formatString, ZoneId zoneId) {
        this.formatString = formatString;
        this.zoneId = zoneId;
        this.dateTimeFormatter = DateTimeFormatter.ofPattern(this.formatString).withZone(this.zoneId);
    }
    //记住,这个方法一定要加,否则dateTimeFormatter对象会是空,此方法会在反序列的时候调用,这样才能正确初始化dateTimeFormatter对象
    //那有的人问了,上面构造函数不是初始化了吗?反序列化的时候是不走构造函数的
    private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
        in.defaultReadObject();
        this.dateTimeFormatter = DateTimeFormatter.ofPattern(formatString).withZone(zoneId);
    }
    @Override
    public Path getBucketPath(Clock clock, Path basePath, PageLog element) {
        String newDateTimeString = dateTimeFormatter.format(Instant.ofEpochMilli(element.getuId()*1000));
      // System.out.println(element.getuId());
       // System.out.println(newDateTimeString);
        return new Path(basePath + "/" + newDateTimeString);
    }
}

P_PageLog

package com.flink.app.parJson;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.flink.bean.PageLog;

public class P_PageLog {
    public static PageLog getPageLog(String in){
       // System.out.println(in);
        JSONObject jsonObject = JSON.parseObject(in);
        String app_id = jsonObject.getString("app_id");
        String device_id = jsonObject.getString("device_id");
        String pageId = jsonObject.getString("page_id");
        String uId = jsonObject.getString("uid");
        PageLog pageLog = new PageLog(app_id,device_id,pageId,Long.valueOf(uId));
        return pageLog;
    }
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值