kafka代码实现模板

kafka生产者

创建Kafka生产者

Properties properties = new Properties();
properties.put("bootstrap.servers", "localhost:9092");
properties.put("acks", "all");
properties.put("retries", 0);
properties.put("batch.size", 16384);
properties.put("linger.ms", 1);
properties.put("buffer.memory", 33554432);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(properties);

同步发送消息(无返回值)

KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(properties);
kafkaProducer.send(new ProducerRecord<>("test", "message" + i));
kafkaProducer.close();

同步发送消息(有返回值)

KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(properties);
ProducerRecord<String, String> syncRecord = new ProducerRecord<>("test20200519", "Kafka_Products", "测试"); 
Future future = kafkaProducer.send(syncRecord);
Object o = future.get();
kafkaProducer.close();

异步发送消息

KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(properties);
ProducerRecord<String, String> asyncRecord = new ProducerRecord<String, String>("test20200519", "Kafka_Products","测试--1");//Topic Key Value
//发送消息时,传递一个回调对象,该回调对象必须实现org.apache.kafka.clients.producer.Callback接口
kafkaProducer.send(asyncRecord, new DemoProducerCallback());
kafkaProducer.close();


//回调对象
class DemoProducerCallback implements Callback{
    @Override
    public void onCompletion(RecordMetadata recordMetadata, Exception e) {
        if (e != null) {//如果Kafka返回一个错误,onCompletion方法抛出一个non null异常。
            e.printStackTrace();//对异常进行一些处理,这里只是简单打印出来
        }
    }
}

kafka消费者

创建kafka消费者

Properties properties = new Properties();
properties.put("bootstrap.servers", "localhost:9092");
properties.put("group.id", "test");
properties.put("enable.auto.commit", "true");
properties.put("auto.commit.interval.ms", "1000");
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("auto.offset.reset", "earliest");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);

订阅和拉取kafka消息

	    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Arrays.asList("banana"));
        try {
            while (true) {
                //拉取消息
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
                for (ConsumerRecord<String, String> record : records) {
                    System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
                }
            }
        } finally {
            consumer.close();
        }

kafka生产者线程

kafka同步生产者

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;

public class KafkaProducerThread extends Thread {
    KafkaProducer<Integer, String> producer;
    String topic;

    public KafkaProducerThread(String topic) {
        // 构建连接配置
        Properties properties = new Properties();
        // 若要配多个服务器,用逗号隔开
        // 注:服务器要开放端口,若云服务器还要在server.properties配置内网IP和外网IP
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, "my-producer");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 构造Client无非是:new 或 工厂模式
        producer = new KafkaProducer<Integer, String>(properties);
        this.topic = topic;
    }

    public void run() {
        int num = 0;
        String msg = "kafka practice msg: " + num;
        try {
            while (num < 20) {
                // 发送消息send()!!! 同步调用
                // Future.get()会阻塞,等待返回结果......
                RecordMetadata recordMetadata = producer.send(new ProducerRecord<>(topic, msg)).get();
                // 等上面get到结果了,才能执行这里
                System.out.println(recordMetadata.offset() + "->" + recordMetadata.partition() +
                        "->" + recordMetadata.topic());
                TimeUnit.SECONDS.sleep(2);
                num++;
            }
        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
        }
        
    }

    public static void main(String[] args) {
        new KafkaProducerThread("test").start();
    }
}

kafka异步生产者

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;

public class KafkaProducerThread extends Thread {
    KafkaProducer<Integer, String> producer;
    String topic;

    public KafkaProducerThread(String topic) {
        // 构建连接配置
        Properties properties = new Properties();
        // 若要配多个服务器,用逗号隔开
        // 注:服务器要开放端口,若云服务器还要在server.properties配置内网IP和外网IP
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, "my-producer");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 构造Client无非是:new 或 工厂模式
        producer = new KafkaProducer<Integer, String>(properties);
        this.topic = topic;
    }

    public void run() {
        int num = 0;
        String msg = "kafka practice msg: " + num;
        try {
            while (num < 20) {
                // 发送消息send()!!! 同步调用
                // Future.get()会阻塞,等待返回结果......
                RecordMetadata recordMetadata = producer.send(new ProducerRecord<>(topic, msg)).get();
                // 等上面get到结果了,才能执行这里
                System.out.println(recordMetadata.offset() + "->" + recordMetadata.partition() +
                        "->" + recordMetadata.topic());
                TimeUnit.SECONDS.sleep(2);
                num++;
            }
        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
        }
        
    }

    public static void main(String[] args) {
        new KafkaProducerThread("test").start();
    }
}

kafka消费者线程

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

public class KafkaConsumerThread extends Thread{
    KafkaConsumer<Integer,String> consumer;
    String topic;

    public KafkaConsumerThread(String topic){
        // 构建连接配置,这里是ConsumerConfig
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "39.105.136.112:9092");
        properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "my-consumer");
        // 反序列化,这里是Deserializer
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
                IntegerDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
                StringDeserializer.class.getName());

        // 以下是Producer没有的配置
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "my-gid"); // 要加入的group
        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000"); // 超时,心跳
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); // 自动提交(批量)
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // 新group消费为位置

        consumer = new KafkaConsumer<>(properties);
        this.topic = topic;
    }

    public void run(){
        // 绑定订阅主题
        // 注:Collections.singleton返回一个单元素&不可修改Set集合,
        // 同样的还有singletonList,singletonMap
        consumer.subscribe(Collections.singleton(this.topic));
        while (true){
            // 接收消息 POLL()!!!
            ConsumerRecords<Integer, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
            // 注:一行的lamada表达式可以不用{}
            consumerRecords.forEach(record -> System.out.println(record.key() + "->" +
                    record.value() + "->" + record.offset()));
        }
    }

    public static void main(String[] args) {
        new KafkaConsumerThread("test").start();
    }
}

kafka session管理系统

KafkaClientService.java

package codeTemplate.kafkaSessionManager.service;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.jvnet.hk2.annotations.Service;

import java.util.Properties;

@Service
public class KafkaClientService {

    public <K, V> KafkaConsumer<K, V> createKafkaConsumer(String groupId, Properties kafkaConsumerProperties) {
        kafkaConsumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        return new KafkaConsumer<>(kafkaConsumerProperties);
    }
}

PollAndStoreMessageForATask.java

package codeTemplate.kafkaSessionManager.consumer;

import codeTemplate.kafkaSessionManager.service.KafkaClientService;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import net.jodah.failsafe.Failsafe;
import net.jodah.failsafe.RetryPolicy;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;

@Slf4j
public class PollAndStoreMessageForATask implements Runnable {

    private final RetryPolicy<Object> retryPolicy = new RetryPolicy<>().handle(Exception.class)
            .withDelay(Duration.ofSeconds(10))
            .withMaxRetries(10)
            .onFailedAttempt(e -> log.error("Failed to create kafka consumer", e.getLastFailure()))
            .onRetry(e -> log.warn("Fail {} times, try again", e.getAttemptCount()));
    private final ConcurrentHashMap<String, List<Map<String, Object>>> session2MessageMap;
    private final KafkaClientService kafkaClientService;

    public PollAndStoreMessageForATask(ConcurrentHashMap<String, List<Map<String, Object>>> session2MessageMap, KafkaClientService kafkaClientService) {
        this.session2MessageMap = session2MessageMap;
        this.kafkaClientService = kafkaClientService;
    }

    @Override
    public void run() {
        KafkaConsumer<String, String> consumer = initConsumer(kafkaClientService);
        ObjectMapper objectMapper = new ObjectMapper();
        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(30));
                if (records == null || records.isEmpty()) {
                    TimeUnit.MILLISECONDS.sleep(100);
                    continue;
                }
                for (ConsumerRecord<String, String> record : records) {
                    Map<String, Object> kafkaMessageMap = objectMapper.readValue(record.value(), Map.class);
                    String session = (String) kafkaMessageMap.get("session");
                    session2MessageMap.putIfAbsent(session, new ArrayList<>());
                    session2MessageMap.get(session).add(kafkaMessageMap);
                }
            }
        } catch (InterruptedException | JsonProcessingException e) {
            log.error("consuming kafka has a exception", e);
        }

    }

    private KafkaConsumer<String, String> initConsumer(KafkaClientService kafkaClientService) {
        Properties kafkaConsumerProperties = new Properties();
        kafkaConsumerProperties.put(ConsumerConfig.CLIENT_ID_CONFIG, "codeTemplate-consumer-for-A" + System.currentTimeMillis());
        kafkaConsumerProperties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1);
        kafkaConsumerProperties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 6 * 60 * 1000);
        KafkaConsumer<String, String> consumer = Failsafe.with(retryPolicy)
                .onFailure(e -> log.error("Failed to create kafka consumner finally", e.getFailure()))
                .onSuccess(e -> log.info("Succeed to create kafka consumer, {}", e.getResult()))
                .get(() -> kafkaClientService.createKafkaConsumer("codeTemplate-" + System.currentTimeMillis(), kafkaConsumerProperties));
        consumer.subscribe(Collections.singletonList("A-async-result"));
        return consumer;
    }
}

KafkaConsumerManagerForA.java

package codeTemplate.kafkaSessionManager.consumer;

import codeTemplate.kafkaSessionManager.service.KafkaClientService;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import jakarta.inject.Inject;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.jvnet.hk2.annotations.Service;

import java.util.List;
import java.util.Map;
import java.util.concurrent.*;

@Slf4j
@Service
public class KafkaConsumerManagerForA {

    @Inject
    private KafkaClientService kafkaClientService;
    @Getter
    private final ConcurrentHashMap<String, List<Map<String, Object>>> session2MessageMap = new ConcurrentHashMap<>();
    private static final ThreadPoolExecutor executorService = new ThreadPoolExecutor(1, 1, 30, TimeUnit.SECONDS,
            new ArrayBlockingQueue<>(5), new ThreadFactoryBuilder().setNameFormat("basic-thread-%d").build(), new ThreadPoolExecutor.CallerRunsPolicy());

    /**
     * 静态内部类单例模式
     **/
    private KafkaConsumerManagerForA() {
        executorService.execute(new PollAndStoreMessageForATask(session2MessageMap, kafkaClientService));
    }

    private static class SingletonHolder {
        static KafkaConsumerManagerForA singleton = new KafkaConsumerManagerForA();
    }

    public static KafkaConsumerManagerForA getInstance() {
        return SingletonHolder.singleton;
    }
}

运行

package codeTemplate.kafkaSessionManager;

import codeTemplate.kafkaSessionManager.consumer.KafkaConsumerManagerForA;

import java.util.List;
import java.util.Map;

public class KafkaDemo {

    public static void main(String[] args) {
        String sessionId = "taskId";
        getTaskMessage(sessionId);
    }

    private static String getTaskMessage(String sessionId) {
        KafkaConsumerManagerForA instance = KafkaConsumerManagerForA.getInstance();
        Map<String, List<Map<String, Object>>> session2MessageMap = instance.getSession2MessageMap();
        List<Map<String, Object>> messageMapList = session2MessageMap.remove(sessionId);
        for (Map<String, Object> messageMap : messageMapList) {
            Map<String, Object> ackmml = (Map<String, Object>) messageMap.get("ackmml");
            if (ackmml.get("errmsg") != null) {
                throw new RuntimeException("failure");
            }
            if (ackmml.get("result") != null) {
                List<Map<String, Object>> result = (List<Map<String, Object>>) ackmml.get("result");
                Map<String, Object> info = result.get(0);
                String message = (String) info.get("message");
                if (message != null) {
                    return message;
                }
            }
        }
        return "";
    }
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值