Kafka Java示例

一、一些问题

1.1 kafka错误

错误:
  WARN Error while fetching metadata with correlation id 0 : {test-topic=LEADER_NOT_AVAILABLE} (org.apache.kafka.clients.NetworkClient)

解决方案https://github.com/wurstmeister/kafka-docker/issues/85https://github.com/wurstmeister/kafka-docker/issues/85

config/server.properties:

>set advertised.host.name = ip。

1.2 kafka命令行
>创建topic:sh kafka-topics.sh --create --zookeeper 10.95.177.100:2181,10.95.97.175:2181,10.95.176.44:2181  --replication-factor 1 --partitions 1 --topic  topic-test
>查看topic:sh kafka-topics.sh --list --zookeeper 10.95.177.192:2181,10.95.97.175:2181,10.95.176.44:2181
>删除topic:
sh kafka-topics.sh --delete --topic topic-test --zookeeper 10.95.177.192:2181,10.95.97.175:2181,10.95.176.44:2181 
>生产消息:
sh kafka-console-producer.sh --broker-list 10.95.177.192:9092,10.95.97.175:9092 --topic topic-test
>消费消息:
sh kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic topic-test --from-beginning  --zookeeper 10.95.177.192:2181,10.95.97.175:2181,10.95.176.44:2181 
>启动kafka服务:
./kafka-server-start.sh -daemon ../config/server.properties
>停止kafka服务:
./kafka-server-stop

1.3 Java API版本

一种:kafka.javaapi.*:

  1. import kafka.javaapi.producer.Producer;
  2. import kafka.producer.KeyedMessage;
  3. import kafka.producer.ProducerConfig;
另一种: org.apache.kafka.*:

import org.apache.kafka.clients.producer.KafkaProducer<K,V>
在Kafka 0.8.2之前,kafka.javaapi.producer.Producer是为唯一官方用Scala实现的Java Client。
在Kafka 0.8.2之后,有新的Java Producer API,org.apache.kafka.clients.producer.KafkaProducer,完全用Java实现的。

二、Java Kafka示例

pom.xml:

  1. <dependencies>
  2. <dependency>
  3. <groupId>org.apache.kafka </groupId>
  4. <artifactId>kafka-clients </artifactId>
  5. <version>0.10.2.0 </version>
  6. </dependency>
  7. <dependency>
  8. <groupId>org.apache.kafka </groupId>
  9. <artifactId>kafka_2.11 </artifactId>
  10. <version>0.10.2.0 </version>
  11. </dependency>
  12. </dependencies>
2.1 老版本Producer:

KafkaProducerOld.java:

  1. import kafka.javaapi.producer.Producer;
  2. import kafka.producer.KeyedMessage;
  3. import kafka.producer.ProducerConfig;
  4. import java.util.Properties;
  5. /**
  6. * kafka.javaapi.producer.Producer
  7. * */
  8. public class KafkaProducerOld {
  9. private Producer<String, String> producer;
  10. public final static String TOPIC = "didi-topic-test";
  11. private KafkaProducerOld() {
  12. Properties props = new Properties();
  13. props.put( "metadata.broker.list", "10.95.177.192:9092,10.95.97.175:9092");
  14. props.put( "serializer.class", "kafka.serializer.StringEncoder");
  15. ProducerConfig config = new ProducerConfig(props);
  16. producer = new Producer<String, String>(config);
  17. }
  18. public void produce() {
  19. int messageNo = 0;
  20. final int COUNT = 10;
  21. while(messageNo < COUNT) {
  22. String data = String.format( "hello kafka.javaapi.producer message %d", messageNo);
  23. KeyedMessage<String, String> msg = new KeyedMessage<String, String>(TOPIC, data);
  24. try {
  25. producer.send(msg);
  26. } catch (Exception e) {
  27. e.printStackTrace();
  28. }
  29. messageNo++;
  30. }
  31. producer.close();
  32. }
  33. public static void main(String[] args) {
  34. new KafkaProducerOld().produce();
  35. }
  36. }
2.2 老版本Consumer

KafkaConsumerOld.java:

  1. import kafka.consumer.ConsumerConfig;
  2. import kafka.consumer.ConsumerIterator;
  3. import kafka.consumer.KafkaStream;
  4. import kafka.javaapi.consumer.ConsumerConnector;
  5. import kafka.serializer.StringDecoder;
  6. import kafka.utils.VerifiableProperties;
  7. import java.util.HashMap;
  8. import java.util.List;
  9. import java.util.Map;
  10. import java.util.Properties;
  11. /**
  12. * Consumer: kafka.javaapi.consumer.Consumer
  13. * */
  14. public class KafkaConsumerOld {
  15. private final ConsumerConnector consumerConnector;
  16. public final static String TOPIC = "didi-topic-test";
  17. private KafkaConsumerOld() {
  18. Properties props = new Properties();
  19. props.put( "zookeeper.connect", "10.95.177.192:2181,10.95.97.175:2181,10.95.176.44:2181");
  20. props.put( "group.id", "group-1");
  21. //zk连接超时
  22. props.put( "zookeeper.session.timeout.ms", "400");
  23. props.put( "zookeeper.sync.time.ms", "200");
  24. props.put( "auto.commit.interval.ms", "1000");
  25. props.put( "auto.offset.reset", "smallest");
  26. //序列化类
  27. props.put( "serializer.class", "kafka.serializer.StringDecoder");
  28. ConsumerConfig config = new ConsumerConfig(props);
  29. consumerConnector = kafka.consumer.Consumer.createJavaConsumerConnector(config);
  30. }
  31. public void consume() {
  32. Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
  33. topicCountMap.put(KafkaProducerNew.TOPIC, new Integer( 1));
  34. StringDecoder keyDecoder = new StringDecoder( new VerifiableProperties());
  35. StringDecoder valueDecoder = new StringDecoder( new VerifiableProperties());
  36. Map<String, List<KafkaStream<String, String>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);
  37. List<KafkaStream<String, String>> streams = consumerMap.get(TOPIC);
  38. for ( final KafkaStream stream : streams) {
  39. ConsumerIterator<String, String> it = stream.iterator();
  40. while (it.hasNext()) {
  41. System.out.println( "this is kafka consumer : " + new String( it.next().message().toString()) );
  42. }
  43. }
  44. }
  45. public static void main(String[] args) {
  46. new KafkaConsumerOld().consume();
  47. }
  48. }
2.3 新版本Producer

KafkaProducerNew.java:

  1. import org.apache.kafka.clients.producer.KafkaProducer;
  2. import org.apache.kafka.clients.producer.ProducerConfig;
  3. import org.apache.kafka.clients.producer.ProducerRecord;
  4. import org.apache.kafka.common.serialization.StringSerializer;
  5. import java.util.Properties;
  6. /**
  7. * producer: org.apache.kafka.clients.producer.KafkaProducer;
  8. * */
  9. public class KafkaProducerNew {
  10. private final KafkaProducer<String, String> producer;
  11. public final static String TOPIC = "didi-topic-test";
  12. private KafkaProducerNew() {
  13. Properties props = new Properties();
  14. props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.95.177.192:9092,10.95.97.175:9092");
  15. props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
  16. props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
  17. producer = new KafkaProducer<String, String>(props);
  18. }
  19. public void produce() {
  20. int messageNo = 1;
  21. final int COUNT = 10;
  22. while(messageNo < COUNT) {
  23. String key = String.valueOf(messageNo);
  24. String data = String.format( "hello KafkaProducer message %s", key);
  25. try {
  26. producer.send( new ProducerRecord<String, String>(TOPIC, data));
  27. } catch (Exception e) {
  28. e.printStackTrace();
  29. }
  30. messageNo++;
  31. }
  32. producer.close();
  33. }
  34. public static void main(String[] args) {
  35. new KafkaProducerNew().produce();
  36. }
  37. }
2.4 新版本Consumer:

KafkaConsumerNew.java:

  1. import org.apache.kafka.clients.consumer.*;
  2. import java.util.Arrays;
  3. import java.util.Properties;
  4. /**
  5. * consumer: org.apache.kafka.clients.consumer.Consumer
  6. * */
  7. public class KafkaConsumerNew {
  8. private Consumer<String, String> consumer;
  9. private static String group = "group-1";
  10. private static String TOPIC = "didi-topic-test";
  11. private KafkaConsumerNew() {
  12. Properties props = new Properties();
  13. props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.95.177.192:9092,10.95.97.175:9092");
  14. props.put(ConsumerConfig.GROUP_ID_CONFIG, group);
  15. props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
  16. props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); // 自动commit
  17. props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); // 自动commit的间隔
  18. props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
  19. props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
  20. props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
  21. consumer = new KafkaConsumer<String, String>(props);
  22. }
  23. private void consume() {
  24. consumer.subscribe(Arrays.asList(TOPIC)); // 可消费多个topic,组成一个list
  25. while ( true) {
  26. ConsumerRecords<String, String> records = consumer.poll( 1000);
  27. for (ConsumerRecord<String, String> record : records) {
  28. System.out.printf( "offset = %d, key = %s, value = %s \n", record.offset(), record.key(), record.value());
  29. try {
  30. Thread.sleep( 1000);
  31. } catch (InterruptedException e) {
  32. e.printStackTrace();
  33. }
  34. }
  35. }
  36. }
  37. public static void main(String[] args) {
  38. new KafkaConsumerNew().consume();
  39. }
  40. }

Author:忆之独秀

Email:leaguenew@qq.com

注明出处:http://blog.csdn.net/lavorange/article/details/78970977

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值