kafka consumer send producer模式消费发送事务一致性例子

package zktest.zktest;
 
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.requests.IsolationLevel;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
 
public class ConsumerTranProducerTest {
 
    public static void main(String[] args) throws InterruptedException {
     /*   Properties properties = new Properties();
        properties.put("bootstrap.servers", "******");
       // properties.put("request.required.acks", "1");
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		properties.put(ConsumerConfig.GROUP_ID_CONFIG ,"faffg") ;
		properties.put("auto.offset.reset", "earliest");
		properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
		properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
		properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
		properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
		//KafkaAdminClient.create(props);
		properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,"SASL_PLAINTEXT");
		properties.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
		properties.put("sasl.jaas.config",
				"org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='kafka';");*/

		Properties properties = new Properties();
		properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "*****");
		properties.put(ConsumerConfig.GROUP_ID_CONFIG ,"wwaaadd1fw") ;
		properties.put("auto.offset.reset", "earliest");
		properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
		properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
		properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		//props.put("enable.auto.commit", "false");
		properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");

		properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,"SASL_PLAINTEXT");
		properties.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
		properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
		//System.setProperty("java.security.auth.login.config","D:/renwu/2021Q1/kafka预研/kafka_client_jaas.conf");
		properties.put("sasl.jaas.config",
				"org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='kafka';");



		Properties props = new Properties();

		props.put("bootstrap.servers", "10.28.8.60:17002");
		props.put("acks", "all");
		props.put("retries", 2);
		props.put("batch.size", 16384);
		props.put("linger.ms", 10);
		props.put("buffer.memory", 33554432);
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		//配置partitionner选择策略,可选配置
		props.put("partitioner.class", "zktest.zktest.SimplePartitioner");
		props.put("transactional.id", "transactional-id");
		props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,"SASL_PLAINTEXT");
		props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
		props.put("sasl.jaas.config",
				"org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='kafka';");
		 props.put("enable.idempotence", "true");

        //初始化生产者和消费者
        KafkaProducer<String, String> producer = new KafkaProducer<>(props);
        KafkaConsumer<String,String> consumer= new KafkaConsumer<String, String>(properties);
		consumer.subscribe(Arrays.asList("topic-name18"));
 
 
        //初始化事务
		producer.initTransactions();

        while (true){
            ConsumerRecords<String,String> consumerRecords = consumer.poll(Duration.ofMillis(5000));
            if(!consumerRecords.isEmpty()){
                Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
                //开启事务
                producer.beginTransaction();
                try {
                    for(TopicPartition partition : consumerRecords.partitions()){
                        List<ConsumerRecord<String,String>> partitionRecords = consumerRecords.records(partition);
                        for(ConsumerRecord<String,String> record : partitionRecords){
                            //do some logical processing.
                            ProducerRecord<String,String> producerRecord = new ProducerRecord<>("topic-name17",record.key(),record.value());
							System.out.println("分区:"+record.partition() +"分区offset&&"+record.offset()+"&&分区key:"+record.key()+record.topic());
                            //消费--生产模型
                            producer.send(producerRecord,new Callback() {
								public void onCompletion(RecordMetadata metadata, Exception e) {
									if (e != null) {
										e.printStackTrace();
										System.out.println(11111);
									} else {
										System.out.println("The offset of the record we just sent is: " + metadata.offset()+"the fen qu:"+metadata.partition()+metadata.topic());
									}
								}
							});
                        }
                        long lastConsumedOffset = partitionRecords.get(partitionRecords.size()-1).offset();
                        offsets.put(partition,new OffsetAndMetadata(lastConsumedOffset+1));
                    }
                    //提交消费位移
                    producer.sendOffsetsToTransaction(offsets,"groupId");
                    //Thread.sleep(5000);
                
                    //提交事务
                    producer.commitTransaction();
                }catch (ProducerFencedException e){
                    //中止事务
                    producer.abortTransaction();
                }
            }
        }
    }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值