一、概述
- 环境版本和jar版本一致。
- 环境版本:kafka_2.10-0.8.2.2。
- jar版本:
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.8.2.2</version>
</dependency>
二、代码
1、生产者
package com.cfl.kafka;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
/**
* 生产者
* @author chenfenli
*
*/
public class Producer {
private static String broker = "192.168.1.103:9092";
private static String topics = "t0407";
private static KafkaProducer<String, String> kafkaProducer;
public static void main(String[] args) {
try {
Properties properties = new Properties();
// 地址
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, broker);
// 配置value的序列化类
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
// 配置key的序列化类
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
// 创建消息
kafkaProducer = new KafkaProducer<String, String>(properties);
ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topics, "hello123");
// 发送消息
Future<RecordMetadata> future = kafkaProducer.send(producerRecord);
System.out.println(future.get());
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace()
}
// 关闭
kafkaProducer.close();
}
}
2、消费者
package com.cfl.kafka;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
/**
* 消费者
* @author chenfenli
*
*/
public class Consumer {
private static String zookeeper = "192.168.1.103:2181";
private static String groupId = "test6";
private static String topic = "t0407";
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
// 链接zookeeper
properties.put("zookeeper.connect", zookeeper);
// 配置Value的序列化类
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
// 配置key的序列化类
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
// 消费组
properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
// 分区分配策略:轮询
properties.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY, "roundrobin");
// largest: 最新位置开始消费
// smallest:最新位置开始消费,当第一次消费某个组的时候会从历史数据开始消费
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest");
// 读取消息是不自动更新offset 处理完消息之后commitOffsets更新offset
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(properties));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
// 每个topic开1个线程读取数据
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> messageStreams = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = messageStreams.get(topic);
while(true) {
try {
for (int i = 0; i < streams.size(); i++) {
ConsumerIterator<byte[], byte[]> iterator = streams.get(i).iterator();
while (iterator.hasNext()) {
String msg = new String(iterator.next().message());
System.out.println(msg);
consumer.commitOffsets();
}
}
} catch (Exception e) {
System.err.println(e);
}
}
}
}