Kafka 实例

首先引入相关jar

         <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.10.0.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.zookeeper</groupId>
            <artifactId>zookeeper</artifactId>
            <version>3.4.6</version>
        </dependency>
        <dependency>
            <groupId>com.101tec</groupId>
            <artifactId>zkclient</artifactId>
            <version>0.9</version>
        </dependency>

生产者


/**
 * sunjiamin@sunjiamin
 *
 * @author sunjiamin
 * @version 0.0.1
 * @desc double check实现KafkaProducer的懒加载
 * @date 2017-09-07 09:02:34
 */
public class LazySingletonProducer {

    private static volatile Producer<byte[], String> producer;

    /**
     * 私有化构造方法
     */
    private LazySingletonProducer() {

    }

    /**
     * 实例化
     * @param config
     * @return
     */
    public static Producer<byte[], String> getInstance(Map<String, Object> config) {
        if (producer == null) {
            synchronized(LazySingletonProducer.class) {
                if (producer == null) {
                    producer = new KafkaProducer<byte[], String>(config);
                }
            }
        }
        return producer;
    }

    /**
     * 是否初始化
     * @return
     */
    public static boolean isInstanced() {
        return producer != null;
    }
}

客户端通过Producer发送消息

/**
     * 向kafka send
     * @param value
     */
    private void send(String value) {
        final byte[] key = ByteBuffer.allocate(4).putInt(new StringBuilder(app).append(host).toString().hashCode()).array();

        final ProducerRecord<byte[], String> record = new ProducerRecord<byte[], String>(this.topic, key, value);
        LazySingletonProducer.getInstance(this.config).send(record, new Callback() {
            public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                // TODO: 异常发生如何处理(直接停掉appender)

            }
        });
    }

消费者:

构建消费者bean

@Configuration
@EnableConfigurationProperties({KafkaProperties.class})
public class KafkaConfiguration {

    @Autowired
    private KafkaProperties kafkaProperties;

    // kafka consumer
    @Bean
    public KafkaConsumer<byte[], String> kafkaConsumer() {
        Map<String, Object> config = new HashMap<String, Object>();
        config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaProperties.getBrokers());
        config.put(ConsumerConfig.GROUP_ID_CONFIG, this.kafkaProperties.getConsumeGroup());
        // 手动commit offset到kafka(该版本不将offset保存到zk)
        config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        config.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 15000);
        config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
        config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

        KafkaConsumer<byte[], String> kafkaConsumer = new KafkaConsumer<byte[], String>(config);

        return kafkaConsumer;
    }

}

引用的属性配置文件:

@ConfigurationProperties(prefix = "spring.message.kafka")
public class KafkaProperties {

    private String brokers;

    private String consumeGroup;

    private long pollTimeout;

    private String topic;

    public String getBrokers() {
        return brokers;
    }

    public void setBrokers(String brokers) {
        this.brokers = brokers;
    }

    public String getConsumeGroup() {
        return consumeGroup;
    }

    public void setConsumeGroup(String consumeGroup) {
        this.consumeGroup = consumeGroup;
    }

    public long getPollTimeout() {
        return pollTimeout;
    }

    public void setPollTimeout(long pollTimeout) {
        this.pollTimeout = pollTimeout;
    }

    public String getTopic() {
        return topic;
    }

    public void setTopic(String topic) {
        this.topic = topic;
    }
}

消费者客户端消费:

@Autowired
    private KafkaConsumer kafkaConsumer;
    @Autowired
    private KafkaProperties kafkaProperties;
    @Autowired
    private FileUtil fileUtil;

    public static Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<TopicPartition, OffsetAndMetadata>();
    private Thread thread;

    @Override
    public void doTask() {
        this.thread = Thread.currentThread();
        int count = 0;
        try {
            while (true) {
                ConsumerRecords<byte[], String> records = this.kafkaConsumer.poll(this.kafkaProperties.getPollTimeout());
                if (!records.isEmpty()) {
                    Map<String, List<String>> lines = new HashMap<String, List<String>>();
                    for (ConsumerRecord<byte[], String> record : records) {
                        String value = record.value();
                        LogDto logDto = this.getLogDto(value);
                        if (logDto != null) {
                            if (LogLevel.INFO.isLegal(logDto.getLevel())) {
                                // 是info、error或者warning的日志才进行处理
                                String key = logDto.getDay();
                                if (lines.containsKey(key)) {
                                    // 如果已经存在此天的数据
                                    lines.get(key).add(value);
                                } else {
                                    // 不存在此天的数据
                                    List<String> tmpLines = new ArrayList<String>();
                                    tmpLines.add(value);
                                    lines.put(key, tmpLines);
                                }
                            }
                        } else {
                            LOGGER.info("record transform error, {}", value);
                        }
                        currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1));

                        count++;
                        if (count >= 1000) {
                            // 当达到了1000触发向kafka提交offset
                            this.kafkaConsumer.commitAsync(currentOffsets, new KafkaOffsetCommitCallback());
                            count = 0;
                        }
                    }
                    // save to file
                    int size = this.fileUtil.save(lines);

                    this.kafkaConsumer.commitAsync(currentOffsets, new KafkaOffsetCommitCallback());
                    LOGGER.info("total record: {}, saved {} records to file", records.count(), size);
                }
            }
        } catch (WakeupException e) {
            // do not process, this is shutdown
            LOGGER.error("wakeup, start to shutdown, {}", e);
        } catch (Exception e) {
            LOGGER.error("process records error, {}", e);
        } finally {
            this.kafkaConsumer.commitSync(currentOffsets);
            LOGGER.info("finally commit the offset");
            // 不需要主动调kafkaConsumer.close(), spring bean容器会调用
        }
    }
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值