Kafka消费者和生产者Java API

一、版本信息
jdk: 1.8
kafka: kafka_2.11-0.10.2.1

Java生产者:

import java.util.Properties;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
public class Producer_API {
     public static void main(String[] args) throws InterruptedException {
         Properties props = new Properties();
         //ip及端口
         props.put("bootstrap.servers", "10.255.0.61:9092");
         //The "all" setting we have specified will result in blocking on the full commit of the record, the slowest but most durable setting.
         //“所有”设置将导致记录的完整提交阻塞,最慢的,但最持久的设置。
         props.put("acks", "1");
         //如果请求失败,生产者也会自动重试,即使设置成0 the producer can automatically retry.
         props.put("retries", 0);
         //The producer maintains buffers of unsent records for each partition. 
         props.put("batch.size", 16384);
         //默认立即发送,这里这是延时毫秒数
         // props.put("linger.ms", 1);
         //生产者缓冲大小,当缓冲区耗尽后,额外的发送调用将被阻塞。时间超过max.block.ms将抛出TimeoutException
         // props.put("buffer.memory", 33554432);
         //The key.serializer and value.serializer instruct how to turn the key and value objects the user provides with their ProducerRecord into bytes.
         props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
         props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

         //创建kafka的生产者类
         Producer<String, String> producer = new KafkaProducer<String, String>(props);
         //生产者的主要方法
         // close();//Close this producer.
         // close(long timeout, TimeUnit timeUnit); //This method waits up to timeout for the producer to complete the sending of all incomplete requests.
         for(int i = 0; i < 10000; i++){//这里平均写入4个分区
             System.out.println(i);
             System.out.println("begin");
             Thread.sleep(5000);
             producer.send(new ProducerRecord<String, String>("acctest",0, Integer.toString(i), Integer.toString(i)));

         }
        /*int i = 0;
        while(true) {
             System.out.println(i);
             producer.send(new ProducerRecord<String, String>("test",0, Integer.toString(6), Integer.toString(6)));
             i++;
        }*/
        System.out.println("end");
        producer.flush(); //所有缓存记录被立刻发送
        producer.close();
    }
}
Java消费者:
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import net.sf.json.JSONObject;

public class Consumer_API extends Thread {
   //消费者连接
   private final ConsumerConnector consumer;   
   //要消费的话题
   private final String topic;   

   public Consumer_API(String topic) {   
       consumer =kafka.consumer.Consumer   
               .createJavaConsumerConnector(createConsumerConfig());   
       this.topic =topic;   
   }   

    //配置相关信息
    private static ConsumerConfig createConsumerConfig() {   
       Properties props = new Properties();   

       props.put("auto.offset.reset", "smallest");
       //props.put("zookeeper.connect","localhost:2181,10.XX.XX.XX:2181,10.XX.XX.XX:2181");
       //配置要连接的zookeeper地址与端口
       //The ‘zookeeper.connect’ string identifies where to find once instance of Zookeeper in your cluster.
       //Kafka uses ZooKeeper to store offsets of messages consumed for a specific topic and partition by this Consumer Group
       props.put("zookeeper.connect","10.255.0.167:2181");
       // props.put("metadata.broker.list", "127.0.0.1:9092");

       //配置zookeeper的组id (The ‘group.id’ string defines the Consumer Group this process is consuming on behalf of.)
       props.put("group.id", "me");

       //配置zookeeper连接超时间隔
       //The ‘zookeeper.session.timeout.ms’ is how many milliseconds Kafka will wait for 
       //ZooKeeper to respond to a request (read or write) before giving up and continuing to consume messages.
       props.put("zookeeper.session.timeout.ms","10000"); 

       //The ‘zookeeper.sync.time.ms’ is the number of milliseconds a ZooKeeper ‘follower’ can be behind the master before an error occurs.
       props.put("zookeeper.sync.time.ms", "200");

       //The ‘auto.commit.interval.ms’ setting is how often updates to the consumed offsets are written to ZooKeeper. 
       //Note that since the commit frequency is time based instead of # of messages consumed, if an error occurs between updates to ZooKeeper on restart you will get replayed messages.
       props.put("auto.commit.interval.ms", "1000");
       props.put("rebalance.max.retries", "6");
       props.put("rebalance.backoff.ms", "1200");
       return new ConsumerConfig(props);   
    }   

    public void run(){ 

       Map<String,Integer> topickMap = new HashMap<String, Integer>();   
       topickMap.put(topic, 1);   
       Map<String, List<KafkaStream<byte[],byte[]>>>  streamMap =consumer.createMessageStreams(topickMap);   
       System.out.println(streamMap);
       KafkaStream<byte[],byte[]>stream = streamMap.get(topic).get(0);   
       ConsumerIterator<byte[],byte[]> it =stream.iterator();   
       System.out.println("*********Results********");   
       while(true){   
           if(it.hasNext()){ 
               //打印得到的消息   
               JSONObject jsStr = JSONObject.fromObject(new String(it.next().message())); 
               System.out.println(jsStr.toString());
           }
       }   
    }  

    public static void main(String[] args) {   
        MyConsumer_Use consumerThread = new MyConsumer_Use("halo");   //kafka主题
        consumerThread.start();   
    }   
}

附: kafka jar可以直接从kafka的安装lib里copy,一定要注意jar包的版本不要搞错,不然会出很多千奇百怪的问题。
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值