导入依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.2.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/log4j/log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-api -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-log4j12 -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.8.1</version>
</dependency>
事务控制
首先应该进行事务提交级别设置
生产者应该开启事务
//开启事务
properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id"+ UUID.randomUUID());
消费者应该设置提交事务的隔离级别
//设置事务的隔离级别 为已提交
properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed");
读取结果
- 生产者成功提交数据 控制台没错 消费者隔离级别是read_committed 读取到五条数据。
生产者逻辑
消费者控制台五条全部读取 - 生产者提交数据失败 控制台报错 消费者隔离级别是read_committed 只能读取成功提交的数据 所以一条数据也没有读取到。
生产者逻辑
生产者控制台报错
消费者没有得到数据
- 生产者提交数据失败 控制台报错 消费者隔离级别是read_uncommitted 可以读取未提交的数据 i是3的时候报错 所以可以读取四个 i是0 1 2 3时候send的数据。
生产者代码逻辑
生产者控制台报错
消费者控制台读取四条数据 到 i=3 截止
设置为read_committed时候是生产者已提交的数据才能读取到
设置为read_uncommitted时候可以读取到未提交的数据(报错终止前的数据)
生产者消费者具体代码实现
生产者Only
package com.baizhi.jsy.transaction;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
import java.util.UUID;
public class ProductKafkaTransactionnOnly {
public static void main(String[] args) {
//创建生产者
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "Centos:9092");
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
//优化参数
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 1024 * 1024);//生产者尝试缓存记录,为每一个分区缓存一个mb的数据
properties.put(ProducerConfig.LINGER_MS_CONFIG, 500);//最多等待0.5秒.
//开启幂等性 acks必须是-1
properties.put(ProducerConfig.ACKS_CONFIG,"-1");
//允许超时最大时间
properties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,5000);
//失败尝试次数
properties.put(ProducerConfig.RETRIES_CONFIG,3);
//开幂等性 精准一次写入
properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);
//开启事务
properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id"+ UUID.randomUUID());
KafkaProducer<String, String> kafkaProducer = new KafkaProducer<String, String>(properties);
//初始化事务
kafkaProducer.initTransactions();
try {
//开启事务
kafkaProducer.beginTransaction();
for (int i=0;i<5;i++){
ProducerRecord<String, String> record = new ProducerRecord<>(
"topic01",
"Transaction",
"Test committed Transaction1");
kafkaProducer.send(record);
kafkaProducer.flush();
if (i==3){
Integer b=i/0;
}
}
//事务提交
kafkaProducer.commitTransaction();
} catch (ProducerFencedException e) {
//终止事务
kafkaProducer.abortTransaction();
e.printStackTrace();
}
kafkaProducer.close();
}
}
消费者
package com.baizhi.jsy.transaction;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Properties;
public class ConsumerKafkaReadCommitted {
public static void main(String[] args) {
//创建消费者
Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"Centos:9092");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group01");
//设置事务的隔离级别 如果事务没有提交 读取不到
properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed");
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(properties);
kafkaConsumer.subscribe(Arrays.asList("topic01"));
try {
while (true){
//设置间隔多长时间取一次数据
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofSeconds(1));
//判断数据是否是空的
if(!consumerRecords.isEmpty()){
Iterator<ConsumerRecord<String, String>> iterator = consumerRecords.iterator();
while (iterator.hasNext()){
ConsumerRecord<String, String> next = iterator.next();
String topic = next.topic();
String key = next.key();
String value = next.value();
long offset = next.offset();
int partition = next.partition();
long timestamp = next.timestamp();
System.out.println("key = " + key+"\t"+"offset = " + offset+"\t"+"value = " + value+"\t"+"partition = " + partition+"\t"+"timestamp = " + timestamp+"\t"+"topic = " + topic);
}
}
}
} catch (Exception e) {
e.printStackTrace();
}finally {
kafkaConsumer.close();
}
}
}