kafka例子
1、kafka是集群消息中间件依赖Zookeeper,所以先 安装Zookeeper环境启动Zookeeper。
2、下载kafka并启动。
3、运行生产者、消费者。
代码例子:
先创建maven项目
pom.xml文件内容:
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cn.com</groupId>
<artifactId>kafkademo</artifactId>
<packaging>war</packaging>
<version>0.0.1-SNAPSHOT</version>
<name>kafkademo</name>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.12</artifactId>
<version>1.1.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<finalName>kafkademo</finalName>
</build>
</project>
生产者ProducerDemo.java:
package cn.com.demo;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
public class ProducerDemo {
// Topic
private static final String topic = "kafkaTopic";
public static void main(String[] args) throws Exception {
Properties props = new Properties();
/*集群 kafka服务地址*/
//props.put("bootstrap.servers", "192.168.1.120:9092,192.168.1.135:9093,192.168.1.227:9094");
props.put("bootstrap.servers", "127.0.0.1:9092");
/*消息确认方式:
* 0 代表:不进行消息接收是否成功的确认(默认值);
1 代表:当Leader副本接收成功后,返回接收成功确认信息;
-1 代表:当Leader和Follower副本都接收成功后,返回接收成功确认信息*/
props.put("acks", "1");
props.put("group.id", "1111");//用来唯一标识consumer进程所在组的字符串
/*设置大于0的值将使客户端重新发送任何数据,一旦这些数据发送失败。注意,这些重试与客户端接收到发送错误时的重试没有什么不同。
允许重试将潜在的改变数据的顺序,如果这两个消息记录都是发送到同一个partition,则第一个消息失败第二个发送成功,则第二条消息会比第一条消息出现要早*/
props.put("retries", "0");
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//生产者实例
KafkaProducer producer = new KafkaProducer(props);
int i = 1;
// 发送业务消息
// 读取文件 读取内存数据库 读socket端口
System.out.println("生产者发送消息:");
while(i<10) {
//Thread.sleep(1000);
producer.send(new ProducerRecord<String, String>(topic, "key:" + i, "value:" + i));
//producer.close();
System.out.println(OADate.getDatetime()+"key:" + i + " " + "value:" + i);
i++;
}
}
}
消费者ConsumerDemo.java:
package cn.com.demo;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.Properties;
public class ConsumerDemo {
private static final Logger logger = LoggerFactory.getLogger(ConsumerDemo.class);
private static final String topic = "kafkaTopic";
public static void main(String[] args) {
Properties props = new Properties();
//集群
//props.put("bootstrap.servers", "192.168.1.120:9092,192.168.1.135:9093,192.168.1.227:9094");
props.put("bootstrap.servers", "127.0.0.1:9092");
props.put("group.id", "1111");
props.put("enable.auto.commit", "true");//如果为真,consumer所fetch的消息的offset将会自动的同步到zookeeper
props.put("auto.commit.interval.ms", "1000");//consumer 隔多久提交 offsets --消费指针
props.put("auto.offset.reset", "earliest");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList(topic));
System.out.println("消费者接收消息");
while (true) {
ConsumerRecords<String, String> records = consumer.poll(1000);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
}
}
}