kafka自带事务机制
事务场景是有规定的,只有这个场景下才能使用事务
数据采集到kafka之后,简单的业务场景,又写回到kafka
- 创建kafka生产者
- 创建kafka消费者
- properties.put ("transactional.id","transactionid00001"); 指定事务id
- properties.put ("enable.idempotence",true); 开启幂等性
- initTransactions 初始化事务
- beginTransactions 开启事务
- consumer.poll
- 业务
- producer.send
- producer提交offset
- commitTransactions 提交事务
- abordTransactions 如果异常回滚事务
package com.ws;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.time.Duration;
import java.util.*;
public class TransactionTest {
private static final String BOOTSTRAP_SERVER = "dream1:9092,dream2:9092,dream3:9092";
private static final String TRANSACTION_GROUP = "transactional_01";
public static void main(String[] args) {
// 创建消费者
Map<String, Object> propc = new HashMap<>();
propc.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
propc.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
propc.put(ConsumerConfig.GROUP_ID_CONFIG, "transaction_g01");
propc.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
propc.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVER);
// 创建生产者
Map<String, Object> propp = new HashMap<>();
propp.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVER);
propp.put(ProducerConfig.ACKS_CONFIG, "-1");
propp.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
propp.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
// 唯一的事务id
propp.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, TRANSACTION_GROUP);
KafkaProducer<String, String> producer = new KafkaProducer<>(propp);
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(propc);
// 订阅topic
List<String> transaction_topic = Arrays.asList("transaction_topic");
consumer.subscribe(transaction_topic);
// 初始化事务
producer.initTransactions();
while (true) {
// 开启事务
producer.beginTransaction();
try {
// 消费者拉去数据
ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(1000));
// 获取topic的分区信息
Set<TopicPartition> assignment = consumer.assignment();
// 装载分区的偏移量信息
Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
// 按照分区读取数据
for (TopicPartition topicPartition : assignment) {
// 获取分区数据
List<ConsumerRecord<String, String>> records = poll.records(topicPartition);
if (!records.isEmpty()) {
// 读取数据 做业务,然后 发送数据
for (ConsumerRecord<String, String> record : records) {
/*
* 业务,我的业务是变成大写
*/
String s = record.value().toUpperCase();
producer.send(new ProducerRecord<String, String>("transaction_topic_new", record.key(), s));
}
// 组装分区偏移量信息 offset一定要+1
offset.put(topicPartition, new OffsetAndMetadata(records.get(records.size() - 1).offset() + 1));
}
}
// 提交偏移量
producer.sendOffsetsToTransaction(offset, TRANSACTION_GROUP);
// 提交事务
producer.commitTransaction();
} catch (ProducerFencedException e) {
// 回滚事务
producer.abortTransaction();
e.printStackTrace();
}
}
}
}