Kafka读写


package test2;

/**
 * Created by lwc on 4/18/16.
 */
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;


import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;


/**
 * @author <a href="mailto:leicui001@126.com">崔磊</a>
 * @date 2015年11月4日 上午11:44:15
 */
public class MyProducer {


    public static void main(String[] args) throws InterruptedException {


        Properties props = new Properties();
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put("metadata.broker.list", "localhost:9092");
        props.put("partitioner.class", "test2.MyPartitioner");
        props.put("request.required.acks", "1");
        ProducerConfig config = new ProducerConfig(props);
        Producer<String, String> producer = new Producer<String, String>(config);


        // 单个发送
        for (int i = 0; i <= 20; i++) {
            KeyedMessage<String, String> message =
                    new KeyedMessage<String, String>("test", i + "", "Message" + i);
            producer.send(message);
            System.out.println("Sent message " + message);
            Thread.sleep(500);
        }


        // 批量发送
        List<KeyedMessage<String, String>> messages = new ArrayList<KeyedMessage<String, String>>(100);
        for (int i = 0; i <= 20; i++) {
            KeyedMessage<String, String> message =
                    new KeyedMessage<String, String>("test", i + "", "Message" + i);
            messages.add(message);
            System.out.println(message);
            if (i % 10 == 0) {
                producer.send(messages);
                messages.clear();
            }
        }
        producer.send(messages);
    }
}

package test2;

/**
 * Created by lwc on 4/18/16.
 */
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;


/**
 * offset在zookeeper中记录,以group.id为key 分区和customer的对应关系由Kafka维护
 *
 * @author <a href="mailto:leicui001@126.com">崔磊</a>
 * @date 2015年11月4日 上午11:44:15
 */
public class MyHighLevelConsumer {

    /**
     * 该consumer所属的组ID
     */
    private String groupid;

    /**
     * 该consumer的ID
     */
    private String consumerid;

    /**
     * 每个topic开几个线程?
     */
    private int threadPerTopic;

    public MyHighLevelConsumer(String groupid, String consumerid, int threadPerTopic) {
        super();
        this.groupid = groupid;
        this.consumerid = consumerid;
        this.threadPerTopic = threadPerTopic;
    }

    public void consume() {
        Properties props = new Properties();
        props.put("group.id", groupid);
        props.put("consumer.id", consumerid);
        props.put("zookeeper.connect", "localhost:2181");
        props.put("zookeeper.session.timeout.ms", "60000");
        props.put("zookeeper.sync.time.ms", "2000");
        // props.put("auto.commit.interval.ms", "1000");

        ConsumerConfig config = new ConsumerConfig(props);
        ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();

        // 设置每个topic开几个线程
        topicCountMap.put("test", threadPerTopic);

        // 获取stream
        Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap);

        // 为每个stream启动一个线程消费消息
        for (KafkaStream<byte[], byte[]> stream : streams.get("test")) {
            new MyStreamThread(stream).start();
        }
    }

    /**
     * 每个consumer的内部线程
     *
     * @author cuilei05
     *
     */
    private class MyStreamThread extends Thread {
        private KafkaStream<byte[], byte[]> stream;

        public MyStreamThread(KafkaStream<byte[], byte[]> stream) {
            super();
            this.stream = stream;
        }

        @Override
        public void run() {
            ConsumerIterator<byte[], byte[]> streamIterator = stream.iterator();

            // 逐条处理消息
            while (streamIterator.hasNext()) {
                MessageAndMetadata<byte[], byte[]> message = streamIterator.next();
                String topic = message.topic();
                int partition = message.partition();
                long offset = message.offset();
                String key = new String(message.key());
                String msg = new String(message.message());
                // 在这里处理消息,这里仅简单的输出
                // 如果消息消费失败,可以将已上信息打印到日志中,活着发送到报警短信和邮件中,以便后续处理
                System.out.println("consumerid:" + consumerid + ", thread : " + Thread.currentThread().getName()
                        + ", topic : " + topic + ", partition : " + partition + ", offset : " + offset + " , key : "
                        + key + " , mess : " + msg);
            }
        }
    }

    public static void main(String[] args) {
        String groupid = "myconsumergroup";
        MyHighLevelConsumer consumer1 = new MyHighLevelConsumer(groupid, "myconsumer1", 1);
        MyHighLevelConsumer consumer2 = new MyHighLevelConsumer(groupid, "myconsumer2", 1);

        consumer1.consume();
        consumer2.consume();
    }
}

package test2;

/**
 * Created by lwc on 4/18/16.
 */
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;


import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.Broker;
import kafka.common.TopicAndPartition;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.OffsetRequest;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.TopicMetadataResponse;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.Message;
import kafka.message.MessageAndOffset;


/**
 * offset自己维护 目标topic、partition均由自己分配
 *
 * @author <a href="mailto:leicui001@126.com">崔磊</a>
 * @date 2015年11月4日 上午11:44:15
 *
 */
public class MySimpleConsumer {


    public static void main(String[] args) {
        new MySimpleConsumer().consume();
    }
    /**
     * 消费消息
     */
    public void consume() {
        int partition = 0;
        // 找到leader
        Broker leaderBroker = findLeader("localhost:9092", "test", partition);
        // 从leader消费
        SimpleConsumer simpleConsumer =
                new SimpleConsumer(leaderBroker.host(), leaderBroker.port(), 1000, 1000, "mySimpleConsumer");
        long startOffet = 1;
        int fetchSize = 10;


        while (true) {
            long offset = startOffet;
            // 添加fetch指定目标tipic,分区,起始offset及fetchSize(字节),可以添加多个fetch
            FetchRequest req =
                    new FetchRequestBuilder().addFetch("test", 0, startOffet, fetchSize).build();


            // 拉取消息
            FetchResponse fetchResponse = simpleConsumer.fetch(req);

            ByteBufferMessageSet messageSet = fetchResponse.messageSet("test", partition);
            for (MessageAndOffset messageAndOffset : messageSet) {
                Message mess = messageAndOffset.message();
                ByteBuffer payload = mess.payload();
                byte[] bytes = new byte[payload.limit()];
                payload.get(bytes);
                String msg = new String(bytes);

                offset = messageAndOffset.offset();
                System.out.println("partition : " + 3 + ", offset : " + offset + "  mess : " + msg);
            }
            // 继续消费下一批
            startOffet = offset + 1;
        }
    }


    /**
     * 找到制定分区的leader broker
     *
     * @param brokerHosts broker地址,格式为:“host1:port1,host2:port2,host3:port3”
     * @param topic topic
     * @param partition 分区
     * @return
     */
    public Broker findLeader(String brokerHosts, String topic, int partition) {
        Broker leader = findPartitionMetadata(brokerHosts, topic, partition).leader();
        System.out.println(String.format("Leader tor topic %s, partition %d is %s:%d", topic, partition, leader.host(),
                leader.port()));
        return leader;
    }


    /**
     * 找到指定分区的元数据
     *
     * @param brokerHosts broker地址,格式为:“host1:port1,host2:port2,host3:port3”
     * @param topic topic
     * @param partition 分区
     * @return 元数据
     */
    private PartitionMetadata findPartitionMetadata(String brokerHosts, String topic, int partition) {
        PartitionMetadata returnMetaData = null;
        for (String brokerHost : brokerHosts.split(",")) {
            SimpleConsumer consumer = null;
            String[] splits = brokerHost.split(":");
            consumer = new SimpleConsumer(splits[0], Integer.valueOf(splits[1]), 100000, 64 * 1024, "leaderLookup");
            List<String> topics = Collections.singletonList(topic);
            TopicMetadataRequest request = new TopicMetadataRequest(topics);
            TopicMetadataResponse response = consumer.send(request);
            List<TopicMetadata> topicMetadatas = response.topicsMetadata();
            for (TopicMetadata topicMetadata : topicMetadatas) {
                for (PartitionMetadata PartitionMetadata : topicMetadata.partitionsMetadata()) {
                    if (PartitionMetadata.partitionId() == partition) {
                        returnMetaData = PartitionMetadata;
                    }
                }
            }
            if (consumer != null)
                consumer.close();
        }
        return returnMetaData;
    }


    /**
     * 根据时间戳找到某个客户端消费的offset
     *
     * @param consumer SimpleConsumer
     * @param topic topic
     * @param partition 分区
     * @param clientID 客户端的ID
     * @param whichTime 时间戳
     * @return offset
     */
    public long getLastOffset(SimpleConsumer consumer, String topic, int partition, String clientID, long whichTime) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
                new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
        OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientID);
        OffsetResponse response = consumer.getOffsetsBefore(request);
        long[] offsets = response.offsets(topic, partition);
        return offsets[0];
    }
}



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值