Kafka学习(4)——生产消费实践

1. 基于0.8的生产者和消费者

maven依赖:

<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
    <version>0.9.0.1</version>
</dependency>
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka_2.10</artifactId>
    <version>0.9.0.1</version>
</dependency>

1.1 消息生产

package com.best.kafka.test;

import com.best.xingng.cxf.databinding.json.util.JsonUtil;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.UUID;


public class LogProcessProducer {

    private static String KAFKA_ZOOKEEPER_CONNECT = "127.0.0.1:2181";
    private static String KAFKA_METADATA_BROKER_LIST = "127.0.0.1:9092";
    private static String KAFKA_ZK_CONNECTION_TIMEOUT_MS = "1000000";
    private static String TOPIC = "test1";

    public void test() {
        //初始化producer
        Properties producerProps = new Properties();
        producerProps.put("zookeeper.connect", KAFKA_ZOOKEEPER_CONNECT);
        producerProps.put("zk.connectiontimeout.ms", KAFKA_ZK_CONNECTION_TIMEOUT_MS);
        producerProps.put("metadata.broker.list", KAFKA_METADATA_BROKER_LIST);
        producerProps.put("key.serializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
        producerProps.put("value.serializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
        kafka.javaapi.producer.Producer<byte[], byte[]> producer = new kafka.javaapi.producer.Producer<byte[], byte[]>(new ProducerConfig(producerProps));

        //封装数据
        List<KeyedMessage<byte[], byte[]>> keyedMessages = new ArrayList<KeyedMessage<byte[], byte[]>>();
        for (int i=1; i<=100; i++) {
            String s = "test"; //此处数据类型为String
            KeyedMessage<byte[], byte[]> keyedMessage = new KeyedMessage<byte[], byte[]>(TOPIC, UUID.randomUUID().toString().getBytes(), SerializerUtil.serializer(s));
            keyedMessages.add(keyedMessage);
        }

        //批量发送
        producer.send(keyedMessages);
    }
}

SerializerUtil一个自定义的序列化帮助类,实现对象的序列化和反序列化。

1.2 消息消费

一方面,kafka认为0.9.*版本的consumer还不够完善,所以推荐使用0.9.*之前的consumer。

另一方面,0.9.*版本不再在zookeeper上维护consumer的offset,而是维护在broker上。

这对旧的kafka-manager来说影响较大,当前最新的kafka-manager-1.3.0.8已经支持broker上维护的offset,我们还未升级。

package com.best.kafka.test;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.message.MessageAndMetadata;

import java.util.*;


public class LogProcessConsumer {

    private static String KAFKA_ZOOKEEPER_CONNECT = "127.0.0.1:2181";
    private static String KAFKA_METADATA_BROKER_LIST = "127.0.0.1:9092";
    private static String KAFKA_FETCH_MAX_SIZE = (5*1024*1024) + "";
    private static String TOPIC = "test1";
    private static int PARTITION_NUM = 1;

    public void test() {
        //初始化consumer
        Properties consumerProps = new Properties();
        consumerProps.put("zookeeper.connect", KAFKA_ZOOKEEPER_CONNECT);
        consumerProps.put("metadata.broker.list", KAFKA_METADATA_BROKER_LIST);
        consumerProps.put("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
        consumerProps.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
        consumerProps.put("auto.commit.enable", "false");
        consumerProps.put("auto.offset.reset", "smallest");
        consumerProps.put("fetch.message.max.bytes", KAFKA_FETCH_MAX_SIZE);
        consumerProps.put("group.id", "test");
        kafka.javaapi.consumer.ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps));

        //获取KafkaStream
        int num = 10; // partition数目
        HashMap<String, Integer> map = new H
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值