写在最前
目前很多java技术都需要linux环境,真是让人妥协啊。。。。。。
感觉学习需要分为三步
1. kafka原理
2. kafka环境搭建(需要Linux环境,搭建集群)
3. kafka代码
今天写下第三步
kafka代码
pom文件
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.wk</groupId>
<artifactId>kafka-01</artifactId>
<version>0.0.1-SNAPSHOT</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.12</artifactId>
<version>0.10.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.10.2.1</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.10</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
生产者
package cn.siggy.test;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import cn.siggy.kafka.KafkaProperties;
public class producerTest {
public static void main(String[] args) {
Properties props = new Properties();
// kafka的配置
props.put("bootstrap.servers", KafkaProperties.KAFKA_SERVER_URL + ":" + KafkaProperties.KAFKA_SERVER_PORT);
props.put("client.id", "DemoProducer");
// 指定序列化类
props.put("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer<Integer, String> producer = new KafkaProducer<>(props);
int messageNo = 1;
String topic = "";
Boolean isAsync = true;
while (messageNo < 1000) {
String messageStr = "Message_" + messageNo;
long startTime = System.currentTimeMillis();
// 异步 效率快 执行send方法是有一个回调函数
if (isAsync) { // Send asynchronously
producer.send(new ProducerRecord<>(topic, messageNo, messageStr),new DemoCallBack(startTime, messageNo, messageStr));
} else { // Send synchronously
// 同步 多了get()方法
try {
RecordMetadata rm = producer.send(new ProducerRecord<>(topic, messageNo, messageStr)).get();
System.out.println("Sent message: (" + messageNo + ", " + messageStr + ")");
System.out.println("partition:" + rm.partition() + "---offset:" + rm.offset());
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
}
++messageNo;
}
}
}
/**
* 回调类
* @ClassName DemoCallBack
* @Description
* @Author wk
* @Date 2018年3月11日上午9:00:17
*/
class DemoCallBack implements Callback {
private final long startTime;
private final int key;
private final String message;
public DemoCallBack(long startTime, int key, String message) {
this.startTime = startTime;
this.key = key;
this.message = message;
}
/**
* A callback method the user can implement to provide asynchronous handling of
* request completion. This method will be called when the record sent to the
* server has been acknowledged. Exactly one of the arguments will be non-null.
*
* @param metadata
* The metadata for the record that was sent (i.e. the partition and
* offset). Null if an error occurred.
* @param exception
* The exception thrown during processing of this record. Null if no
* error occurred.
*/
public void onCompletion(RecordMetadata metadata, Exception exception) {
long elapsedTime = System.currentTimeMillis() - startTime;
if (metadata != null) {
System.out.println("message(" + key + ", " + message + ") sent to partition(" + metadata.partition() + "), "
+ "offset(" + metadata.offset() + ") in " + elapsedTime + " ms");
} else {
exception.printStackTrace();
}
}
}
消费者(Consumer)
package cn.siggy.kafka;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.MessageAndOffset;
public class ConsumerTest {
private static void printMessages(ByteBufferMessageSet messageSet) throws UnsupportedEncodingException {
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println(new String(bytes, "UTF-8") + "================");
}
}
public static void main(String[] args) throws UnsupportedEncodingException {
// TOPIC2 topic 名称
// KAFKA_SERVER_URL kafka集群地址
// KAFKA_SERVER_PORT 集群的端口号
// KAFKA_PRODUCER_BUFFER_SIZE 生产者缓存
// CONNECTION_TIMEOUT 连接超时时间
final String TOPIC2 = "test2";
SimpleConsumer simpleConsumer = new SimpleConsumer(KafkaProperties.KAFKA_SERVER_URL,
KafkaProperties.KAFKA_SERVER_PORT, KafkaProperties.CONNECTION_TIMEOUT,
KafkaProperties.KAFKA_PRODUCER_BUFFER_SIZE, KafkaProperties.CLIENT_ID);
System.out.println("Testing single fetch");
// 指定读取kafka的队列topic, partition, offset, fetchSize
FetchRequest req = new FetchRequestBuilder()
.clientId(KafkaProperties.CLIENT_ID)
.addFetch(KafkaProperties.TOPIC2, 1, 0L, 100).build();
FetchResponse fetchResponse = simpleConsumer.fetch(req);
// 打印队列消息
printMessages(fetchResponse.messageSet(KafkaProperties.TOPIC2, 1));
}
}