kafka初探小Demo

一:生产者

import java.util.Properties;  

import kafka.javaapi.producer.Producer;  
import kafka.producer.KeyedMessage;  
import kafka.producer.ProducerConfig;  
    
public class producerTest {  
    public static void main(String[] args) {  
        Properties props = new Properties();
        //zookeeper连接服务器地址
       props.put("zk.connect", "localhost:2181"); 
        //指定序列化处理类,默认为kafka.serializer.DefaultEncoder
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        //指定kafka节点列表,用于获取metadata,不必全部指定
        props.put("metadata.broker.list", "localhost:9092");  
        ProducerConfig config = new ProducerConfig(props);  
        Producer<String, String> producer = new Producer<String, String>(config);  
        System.out.println("开始发送消息--------------------------");
        for (int i = 0; i < 100; i++){
            //topic:   topic
            //message:  "哈哈" + i+"次"
            producer.send(new KeyedMessage<String, String>("topic", "hehe" + i+"次"));
        }
        System.out.println("发送消息结束--------------------------");
        producer.close();
    }  
} 

二:消费者

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
 
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;
 
public class ConsumerTest {
 
    private final ConsumerConnector consumer;
 
    private ConsumerTest() {
        Properties props = new Properties();
        //zookeeper 配置
        props.put("zookeeper.connect", "ip1:2181,ip2:2181,ip3:2181");
        //指定消费组
        props.put("group.id", "sf-group");
        //zookeeper的session过期时间,默认5000ms,用于检测消费者是否挂掉,
        //当消费者挂掉,其他消费者要等该指定时间才能检查到并且触发重新负载均衡
        props.put("zookeeper.session.timeout.ms", "4000");
        //指定多久消费者更新offset到zookeeper中。注意offset更新时基于time而不是每次获得的消息。
        //一旦在更新zookeeper发生异常并重启,将可能拿到已拿到过的消息
        props.put("zookeeper.sync.time.ms", "200");
        //自动更新时间。默认60*1000
        props.put("auto.commit.interval.ms", "1000");
        //如果zookeeper没有offset值或offset值超出范围。那么就给个初始的offset。有smallest、largest、
        //anything可选,分别表示给当前最小的offset、当前最大的offset、抛异常。默认largest
        props.put("auto.offset.reset", "smallest");
        //指定序列化处理类,默认为kafka.serializer.DefaultEncoder
        props.put("serializer.class", "kafka.serializer.StringEncoder");
 
        ConsumerConfig config = new ConsumerConfig(props);
 
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);
    }
 
    void consume() {
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put("fanbingbingbitch", new Integer(1));
 
        StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
        StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
 
        Map<String, List<KafkaStream<String, String>>> consumerMap =
                consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
        KafkaStream<String, String> stream = consumerMap.get("topic").get(0);
        ConsumerIterator<String, String> it = stream.iterator();
        while (it.hasNext()){
            System.out.println(it.next().message());
        }
    }
 
    public static void main(String[] args) {
        new ConsumerTest().consume();
    }
}


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值