1. 编写docker-compose文件 zk_kafka.yml
version: '2'
services:
zookeeper:
image: hyperledger/fabric-zookeeper:2.0
expose:
- "2181"
kafka:
image: hyperledger/fabric-kafka:2.0
depends_on:
- zookeeper
ports:
- "9092:9092"
expose:
- "9093"
environment:
KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka:9093,OUTSIDE://appnode40:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_LISTENERS: INSIDE://0.0.0.0:9093,OUTSIDE://0.0.0.0:9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
注意 OUTSIDE设置为容器所在宿主机的hostname,或者实际ip地址
2. 启动 docker kafka容器
docker-compose -f zk_kafka.yml up -d
3. 外部应用程序连接kafka
在java代码中,指定 appnode40:9092即可
public static void main(String[] args) {
int cnt = 0;
String topic = "async";
Properties props = new Properties();
props.put("bootstrap.servers", "appnode40:9092");
props.put("group.id", topic + "_group_id_00xx1");
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(topic));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf((++cnt) + " --> partition = %s, offset = %d, key = %s, value = %s", record.partition(), record.offset(), record.key(), record.value());
System.out.println();
}
}
}