Opengauss同步数据至kafka实例

在这里插入图片描述

配置

postgresql.conf

必须修改 wal_levle = logical

酌情根据实际情况修改 wal_sender_timeoutmax_wal_sender , max_repocation_slots

pg_hba.conf

允许用户replication的访问,以及针对ip的访问限制例如host repication jacky 0.0.0.0/0 md5

docker 模拟kafka集群

version: '3'
services:
  zookeeper:
    image: wurstmeister/zookeeper
    environment:
      DC_NAME: qnib
    ports:
      - "2181:2181"
    privileged: true

 
  zkui:
    image: maauso/zkui
    # dns: 127.0.0.1
    depends_on:
      - zookeeper
    ports:
      - "9090:9090"
    environment:
      ZKLIST: zookeeper:2181
    privileged: true

  kafka:
    image: wurstmeister/kafka
    depends_on: [ zookeeper ]
    ports:
      - "9092:9092"
      - "9094:9094"
    environment:
      # KAFKA_ADVERTISED_HOST_NAME: 0.0.0.0
      # KAFKA_LISTENERS: PLAINTEXT://kafka:9092
      # KAFKA_CREATE_TOPICS: "test:1:1"
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_LISTENERS: INTERNAL://0.0.0.0:9092,OUTSIDE://0.0.0.0:9094
      KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,OUTSIDE://localhost:9094
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL

  
  kafka-manager:
    image: sheepkiller/kafka-manager
    depends_on: [ zookeeper,kafka ]
    ports:
      - "9000:9000"
    environment:
      ZK_HOSTS: zookeeper:2181

代码实例

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.opengauss.PGProperty;
import org.opengauss.jdbc.PgConnection;
import org.opengauss.replication.LogSequenceNumber;
import org.opengauss.replication.PGReplicationStream;

import java.nio.ByteBuffer;
import java.sql.DriverManager;
import java.util.Properties;
import java.util.concurrent.TimeUnit;




public class App {

    public static String SOURCEURL = "jdbc:opengauss://127.0.0.1:5432/t1";
    public static String USER = "tj";
    public static String PASSWD = "Dafei1288@";

    public static String TOPIC = "pg_test";//定义主题

    public static final String BROKERS_ADDRESS = "127.0.0.1:9094";
//    public static final int REQUEST_REQUIRED_ACKS = 1;
//    public static final String CLIENT_ID = "producer_test_id";

    public static void main(String[] args) throws Exception {

        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERS_ADDRESS);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
//        props.put(ProducerConfig.ACKS_CONFIG, String.valueOf(REQUEST_REQUIRED_ACKS));
//        props.put(ProducerConfig.CLIENT_ID_CONFIG, CLIENT_ID);
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer(props);


        Properties properties = new Properties();
        PGProperty.USER.set(properties, USER);
        PGProperty.PASSWORD.set(properties, PASSWD);
        PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, "9.4");
        PGProperty.REPLICATION.set(properties, "database");
        PGProperty.PREFER_QUERY_MODE.set(properties, "simple");

        Class.forName("org.opengauss.Driver");


        PgConnection conn = (PgConnection) DriverManager.getConnection(SOURCEURL, properties);
        System.out.println("connection success!");

        String slotName = "replication_slot";
        String lsn = "22DBF70";

        LogSequenceNumber waitLSN = LogSequenceNumber.valueOf(lsn);

        PGReplicationStream stream = conn
                .getReplicationAPI()
                .replicationStream()
                .logical()
                .withSlotName(slotName)
                .withSlotOption("include-xids", false)
                .withSlotOption("skip-empty-xacts", true)
                .withStartPosition(waitLSN)
//                        .withSlotOption("parallel-decode-num", 10) //解;解码线程并发度
//                        .withSlotOption("white-table-list", "public.logic_test") //白名单列表
//                        .withSlotOption("standby-connection", true) //强制备机解码
//                        .withSlotOption("decode-style", "t") //解码格式
//                        .withSlotOption("sending-bacth", 1) //批量发送解码结果
                .start();

        while (true) {

            ByteBuffer byteBuffer = stream.readPending();

            if (byteBuffer == null) {
                TimeUnit.MILLISECONDS.sleep(10L);
                continue;
            }

            int offset = byteBuffer.arrayOffset();
            byte[] source = byteBuffer.array();
            int length = source.length - offset;

            String res = new String(source, offset, length);

            ProducerRecord<String, String> record = new ProducerRecord<String, String>(TOPIC, res);
            kafkaProducer.send(record);
            System.out.println("send ok ==> "+res);


            //如果需要flush lsn,根据业务实际情况调用以下接口
            LogSequenceNumber lastRecv = stream.getLastReceiveLSN();
            System.out.println(lastRecv);
//                    stream.setFlushedLSN(lastRecv);
//                    stream.forceUpdateStatus();

        }
    }

}

  • 28
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 20
    评论
评论 20
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

麒思妙想

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值