Kafka: ------ 事务控制(生产者、中间件从topic02读取、处理后、写入topic01中、消费者)

需要导入的依赖

<dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka-clients</artifactId>
      <version>2.2.0</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/log4j/log4j -->
    <dependency>
      <groupId>log4j</groupId>
      <artifactId>log4j</artifactId>
      <version>1.2.17</version>
    </dependency>
    <!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-api -->
    <dependency>
      <groupId>org.slf4j</groupId>
      <artifactId>slf4j-api</artifactId>
      <version>1.7.25</version>
    </dependency>
    <!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-log4j12 -->
    <dependency>
      <groupId>org.slf4j</groupId>
      <artifactId>slf4j-log4j12</artifactId>
      <version>1.7.25</version>
    </dependency>
    <dependency>
      <groupId>org.apache.commons</groupId>
      <artifactId>commons-lang3</artifactId>
      <version>3.8.1</version>
    </dependency>

生产者向topic02中写入数据

package com.baizhi.jsy.transaction;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
import java.util.UUID;
public class ProductKafkaTransactionnOnly {
    public static void main(String[] args) {
        //创建生产者
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Centos:9092");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        //优化参数
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 1024 * 1024);//生产者尝试缓存记录,为每一个分区缓存一个mb的数据
        properties.put(ProducerConfig.LINGER_MS_CONFIG, 500);//最多等待0.5秒.

        //开启幂等性 acks必须是-1
        properties.put(ProducerConfig.ACKS_CONFIG,"-1");
        //允许超时最大时间
        properties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,5000);
        //失败尝试次数
        properties.put(ProducerConfig.RETRIES_CONFIG,3);
        //开幂等性  精准一次写入
        properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);

        //开启事务
        properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id"+ UUID.randomUUID());

        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<String, String>(properties);
        //初始化事务
        kafkaProducer.initTransactions();

        try {
            //开启事务
            kafkaProducer.beginTransaction();
            for (int i=0;i<5;i++){
                ProducerRecord<String, String> record = new ProducerRecord<>(
                        "topic02",
                        "Transaction",
                        "Test committed  Transaction1");
                kafkaProducer.send(record);
                kafkaProducer.flush();
                if (i==3){
                    //Integer b=i/0;//写错
                }
            }
            //事务提交
            kafkaProducer.commitTransaction();
        } catch (ProducerFencedException e) {
            //终止事务
            kafkaProducer.abortTransaction();
            e.printStackTrace();
        }

        kafkaProducer.close();
    }
}

中间件 从topic02中读取数据 然后将数据写入topic01中去

package com.baizhi.jsy.transaction;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.time.Duration;
import java.util.*;
public class ProductConsumerKafkaTransactionn {
    public static void main(String[] args) {
        String groupId = "g1";
        KafkaProducer<String, String> kafkaProducer = buildKafkaProducer();//必须设置为手动提交
        KafkaConsumer<String, String> kafkaConsumer = buildKafkaConsummer(groupId);
        //从topic02中读取数据
        kafkaConsumer.subscribe(Arrays.asList("topic02"));
        //初始化事务
        kafkaProducer.initTransactions();
        //只有发送成功的时候才会把偏移量提交 所以需要将消费者的偏移量自动提交关闭
        try {
            while (true){
                ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofSeconds(1));
                if (!consumerRecords.isEmpty()){
                    //开启事务
                    kafkaProducer.beginTransaction();
                    Iterator<ConsumerRecord<String, String>> iterator = consumerRecords.iterator();

                    Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();

                    while (iterator.hasNext()){
                        ConsumerRecord<String, String> record = iterator.next();
                        String key = record.key();
                        String value = record.value();

                        offset.put(new TopicPartition(record.topic(),record.partition()),new OffsetAndMetadata(record.offset()+1));
                        ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("topic01",key,value+"jiangsiyu");
                        kafkaProducer.send(producerRecord);
                    }
                    kafkaProducer.flush();
                    //事务提交
                    kafkaProducer.sendOffsetsToTransaction(offset,groupId);
                    kafkaProducer.commitTransaction();
                }
            }
        } catch (Exception e) {
            //终止事务
            kafkaProducer.abortTransaction();
        }


        kafkaProducer.close();
    }
    //静态方法构建Kafka的生产者
    public static KafkaProducer<String,String> buildKafkaProducer(){
        //创建生产者
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Centos:9092");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        //优化参数
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 1024 * 1024);//生产者尝试缓存记录,为每一个分区缓存一个mb的数据
        properties.put(ProducerConfig.LINGER_MS_CONFIG, 500);//最多等待0.5秒.

        //开启幂等性 acks必须是-1
        properties.put(ProducerConfig.ACKS_CONFIG,"-1");
        //允许超时最大时间
        properties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,5000);
        //失败尝试次数
        properties.put(ProducerConfig.RETRIES_CONFIG,3);
        //开幂等性  精准一次写入
        properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);

        //开启事务
        properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id"+ UUID.randomUUID());
        return new KafkaProducer<String, String>(properties);
    }
    //静态方法构建Kafka消费者
    public static KafkaConsumer<String,String> buildKafkaConsummer(String groupId){
        //创建消费者
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"Centos:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);

        //只有发送成功的时候才会提交偏移量,所以需要关闭消费者偏移量自动提交
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
        //设置事务的隔离级别   如果事务没有提交  读取不到
        properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed");
        return new KafkaConsumer<String, String>(properties);
    }
}

消费者从topic01中读取数据

package com.baizhi.jsy.transaction;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Properties;

public class ConsumerKafkaReadCommitted {
    public static void main(String[] args) {
        //创建消费者
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"Centos:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"g1");

        //设置事务的隔离级别   如果事务没有提交  读取不到
        properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed");
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(properties);
        kafkaConsumer.subscribe(Arrays.asList("topic01"));
        try {
            while (true){
                //设置间隔多长时间取一次数据
                ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofSeconds(1));
                //判断数据是否是空的
                if(!consumerRecords.isEmpty()){
                    Iterator<ConsumerRecord<String, String>> iterator = consumerRecords.iterator();
                    while (iterator.hasNext()){
                        ConsumerRecord<String, String> next = iterator.next();
                        String topic = next.topic();
                        String key = next.key();
                        String value = next.value();
                        long offset = next.offset();
                        int partition = next.partition();
                        long timestamp = next.timestamp();
                        System.out.println("key = " + key+"\t"+"offset = " + offset+"\t"+"value = " + value+"\t"+"partition = " + partition+"\t"+"timestamp = " + timestamp+"\t"+"topic = " + topic);
                    }
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }finally {
            kafkaConsumer.close();
        }

    }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值