Flink_Sink

一、Sink hbase

1、extends RichSinkFunction

1.1 主体类

package com.nfdw;

import com.nfdw.entity.Employees;
import com.nfdw.sink.MyHBaseSinkFunction;
import com.nfdw.utils.*;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import java.util.Date;
import java.util.Properties;

public class App {

    public static void main(String[] args) throws Exception {

        // 1. 获取环境对象
        StreamExecutionEnvironment env = GetStreamExecutionEnvironment.getEnv();
        //请求kafka数据
        Properties prop = new Properties();
        prop.setProperty("bootstrap.servers","cdh101:9092");
        prop.setProperty("group.id","cloudera_mirrormaker");
        prop.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
        FlinkKafkaConsumer011<String> myConsumer = new FlinkKafkaConsumer011("luchangyin", new SimpleStringSchema() ,prop);
        myConsumer.setStartFromLatest();  //最近的

        //请求kafka数据
        DataStreamSource<String> dataStream = env.addSource(myConsumer);
        //dataStream.print();   // {"id":"226","name":"tang tang - 226","sal":280751,"dept":"美女部","ts":1615191802523}

        DataStream<Employees> result = dataStream.map(new MapFunction<String, Employees>() {

            @Override
            public Employees map(String s) throws Exception {
                Employees emp = MyJsonUtils.str2JsonObj(s);
                emp.setEmpStartTime(new Date(emp.getTs()));
                emp.setDt(MyDateUtils.getDate2Second(emp.getEmpStartTime()));
                return emp;
            }
        });

        result.print();
        // Employees(eId=257, eName=fei fei - 257, eSal=97674.0, eDept=美女部, ts=1615251002894, empStartTime=Tue Mar 09 08:50:02 GMT+08:00 2021, dt=2021-03-09)

        // 2. 自定义 sink 入hbase
        result.addSink(new MyHBaseSinkFunction());

        env.execute("wo xi huan ni");

    }

}

1.2、获取环境对象

package com.nfdw.utils;

import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class GetStreamExecutionEnvironment {

    public static StreamExecutionEnvironment getEnv(){
        //获取Flink的运行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //checkpoint配置
        // 每隔5000毫秒启动一个检查点
        env.enableCheckpointing(5000);
        env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
        // 将模式设置为恰好一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 检查点必须在一分钟内完成,否则将被丢弃
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        // 确保在检查点之间有500毫秒的进程
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 只允许一个检查点同时进行
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // job cancellation启用保留的外部检查点
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //状态后端
        //env.setStateBackend(new FsStateBackend("hdfs://10.122.1.112:40010/flink/checkpoints", false));

        // 并行度
        env.setParallelism(3);

        return env;
    }
}

1.3、自定义 sink 入hbase

package com.nfdw.sink;

import com.nfdw.entity.Employees;
import com.nfdw.utils.MyDateUtils;
import com.nfdw.utils.SnowflakeIdUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.MD5Hash;

public class MyHBaseSinkFunction extends RichSinkFunction<Employees> {

    private transient Connection conn = null;
    private transient Table table = null;

    @Override
    public void open(Configuration parameters) throws Exception {
        super.open(parameters);

        org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();
        //链接服务器
        conf.set("hbase.zookeeper.quorum", "10.122.1.112");
        conf.set("hbase.zookeeper.property.clientPort", "2181");
        if (null == conn) {
            this.conn = ConnectionFactory.createConnection(conf);
        }
    }

    @Override
    public void invoke(Employees value, Context context) throws Exception {

        //表名
        TableName tableName = TableName.valueOf("employees");
        // 获取表对象
        table = conn.getTable(tableName);

        // 生成 rowkey
        String pkId = String.valueOf(SnowflakeIdUtil.getdidId(SnowflakeIdUtil.DCD_SNOWFLAKE)); // msg.getPkId()
        byte[] orginkey = Bytes.toBytes(pkId.toString());
        // 为了避免ROWKEY过长,取前4位
        String md5AsHex = MD5Hash.getMD5AsHex(orginkey).substring(0,4);
        String rowkey = md5AsHex + pkId;

        Put put = new Put(Bytes.toBytes(rowkey));
        // 列簇,列名,列值
        put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("eId"), Bytes.toBytes(String.valueOf(value.getEId())));
        put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("eName"), Bytes.toBytes(value.getEName()));
        put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("eSal"), Bytes.toBytes(value.getESal()));
        put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("eDept"), Bytes.toBytes(value.getEDept()));
        put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("ts"), Bytes.toBytes(value.getTs()));
        put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("empStartTime"), Bytes.toBytes(MyDateUtils.getDate2Str(value.getEmpStartTime())));
        put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("dt"), Bytes.toBytes(value.getDt()));
        table.put(put);

    }

    @Override
    public void close() throws Exception {
        super.close();

        if (table != null){
            table.close();
        }

        if (conn != null){
            conn.close();
        }

    }
}

1.4 雪花模型生成 rowkey工具类

package com.nfdw.utils;

import xyz.downgoon.snowflake.Snowflake;

public class SnowflakeIdUtil {

    /** 装置  / 原始报文*/
    public static long groupId = Long.parseLong("6");
    public static long workId = Long.parseLong("10");

    public static Snowflake DCD_SNOWFLAKE = new Snowflake(groupId, workId);

    /**
     * 返回id
     * @return
     */
    public static long getdidId(Snowflake snowflake){
        return snowflake.nextId();
    }

}

1.5 检查数据

  • 在hbase中先创建对应的 hbase 表和 列簇
create 'employees','cf'
  • 执行程序并查看结果 scan “employees”
    在这里插入图片描述

2、实现OutputFormat接口

2.1、自定义HBaseOutputFormat


package cn.swordfall.hbaseOnFlink

import org.apache.flink.api.common.io.OutputFormat
import org.apache.flink.configuration.Configuration
import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants, TableName}
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.util.Bytes

/**
  * @Author: Yang JianQiu
  * @Date: 2019/3/1 1:40
  *
  * 写入HBase提供两种方式
  * 第二种:实现OutputFormat接口
  */
class HBaseOutputFormat extends OutputFormat[String]{

  val zkServer = "192.168.187.201"
  val port = "2181"
  var conn: Connection = null
  var mutator: BufferedMutator = null
  var count = 0

  /**
    * 配置输出格式。此方法总是在实例化输出格式上首先调用的
    *
    * @param configuration
    */
  override def configure(configuration: Configuration): Unit = {

  }

  /**
    * 用于打开输出格式的并行实例,所以在open方法中我们会进行hbase的连接,配置,建表等操作。
    *
    * @param i
    * @param i1
    */
  override def open(i: Int, i1: Int): Unit = {
    val config: org.apache.hadoop.conf.Configuration = HBaseConfiguration.create
    config.set(HConstants.ZOOKEEPER_QUORUM, zkServer)
    config.set(HConstants.ZOOKEEPER_CLIENT_PORT, port)
    config.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 30000)
    config.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 30000)
    conn = ConnectionFactory.createConnection(config)

    val tableName: TableName = TableName.valueOf("test")

    val params: BufferedMutatorParams = new BufferedMutatorParams(tableName)
    //设置缓存1m,当达到1m时数据会自动刷到hbase
    params.writeBufferSize(1024 * 1024) //设置缓存的大小
    mutator = conn.getBufferedMutator(params)
    count = 0
  }

  /**
    * 用于将数据写入数据源,所以我们会在这个方法中调用写入hbase的API
    *
    * @param it
    */
  override def writeRecord(it: String): Unit = {

    val cf1 = "cf1"
    val array: Array[String] = it.split(",")
    val put: Put = new Put(Bytes.toBytes(array(0)))
    put.addColumn(Bytes.toBytes(cf1), Bytes.toBytes("name"), Bytes.toBytes(array(1)))
    put.addColumn(Bytes.toBytes(cf1), Bytes.toBytes("age"), Bytes.toBytes(array(2)))
    mutator.mutate(put)
    //每4条刷新一下数据,如果是批处理调用outputFormat,这里填写的4必须不能大于批处理的记录总数量,否则数据不会更新到hbase里面
    if (count >= 4){
      mutator.flush()
      count = 0
    }
    count = count + 1
  }

  /**
    * 关闭
    */
  override def close(): Unit = {
    try {
      if (conn != null) conn.close()
    } catch {
      case e: Exception => println(e.getMessage)
    }
  }
}

2.2、Flink Streaming流式处理

/**
  * 写入HBase
  * 第二种:实现OutputFormat接口
  */
 def write2HBaseWithOutputFormat(): Unit = {
   val topic = "test"
   val props = new Properties
   props.put("bootstrap.servers", "192.168.187.201:9092")
   props.put("group.id", "kv_flink")
   props.put("enable.auto.commit", "true")
   props.put("auto.commit.interval.ms", "1000")
   props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
   props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
   val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
   env.enableCheckpointing(5000)
   env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
   env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
   val myConsumer = new FlinkKafkaConsumer[String](topic, new SimpleStringSchema, props)

   val dataStream: DataStream[String] = env.addSource(myConsumer)
   
   dataStream.writeUsingOutputFormat(new HBaseOutputFormat)
   
   env.execute()
 }

2.3、Flink DataSet批处理的方式

/**
  * 写入HBase
  * 第二种:实现OutputFormat接口
  */
 def write2HBaseWithOutputFormat(): Unit = {
   val topic = "test"
   val props = new Properties
   props.put("bootstrap.servers", "192.168.187.201:9092")
   props.put("group.id", "kv_flink")
   props.put("enable.auto.commit", "true")
   props.put("auto.commit.interval.ms", "1000")
   props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
   props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
   val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
   env.enableCheckpointing(5000)
   env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
   env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
   val myConsumer = new FlinkKafkaConsumer[String](topic, new SimpleStringSchema, props)
   val dataStream: DataStream[String] = env.addSource(myConsumer)
   dataStream.writeUsingOutputFormat(new HBaseOutputFormat)
   env.execute()
 }
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值