一、Kafka Eagle
1.1、Eagle安装
1)修改 kafka 启动命令
修改 kafka-server-start.sh 命令:
注意:修改之后在启动 Kafka 之前要分发之其他节点
2)上传压缩包 kafka-eagle-bin-1.3.7.tar.gz 到集群并解压
3)给启动文件执行权限
4)修改环境变量
5)修改配置文件system-config.properties
######################################
# multi zookeeper&kafka cluster list
######################################
kafka.eagle.zk.cluster.alias=cluster1
cluster1.zk.list=hadoop102:2181,hadoop103:2181,hadoop104:2181
######################################
# kafka offset storage
######################################
cluster1.kafka.eagle.offset.storage=kafka
######################################
# enable kafka metrics
######################################
kafka.eagle.metrics.charts=true
kafka.eagle.sql.fix.error=false
######################################
# kafka jdbc driver address
######################################
kafka.eagle.driver=com.mysql.jdbc.Driver
kafka.eagle.url=jdbc:mysql://hadoop102:3306/ke?useUnicode=true&ch
aracterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull
kafka.eagle.username=root
kafka.eagle.password=root
6)使用docker安装mysql
安装docker:
安装mysql:
创建数据库:
7)启动
注意:启动之前需要先启动 ZK 以及 KAFKA
1.2、Eagle简单使用
1)启动消费者
package com.dianchou.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Collections;
import java.util.Properties;
/**
* @author lawrence
* @create 2021-02-05
*/
public class MyConsumer {
public static void main(String[] args) {
Properties properties = new Properties();
//连接的集群
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop102:9092");
//开启自动提交
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
//自动提交间隔
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
//key value反序列化
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
//消费者组
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "group-test");
//创建消费者
KafkaConsumer consumer = new KafkaConsumer<>(properties);
//订阅主题
consumer.subscribe(Collections.singletonList("group-test"));
//获取数据
while (true) {
ConsumerRecords consumerRecords = consumer.poll(100);
//解析并打印consumerRecords
for (ConsumerRecord consumerRecord : consumerRecords) {
System.out.printf("offset = %d, key = %s, value = %s%n", consumerRecord.offset(), consumerRecord.key(), consumerRecord.value());
}
}
}
}
2)eagle中查看topic及消费者
3)启动生产者
package com.dianchou.kafka.producer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
/**
* @author lawrence
* @create 2021-02-04
*/
public class MyProducer {
public static void main(String[] args) throws ExecutionException, InterruptedException {
//创建Kafka生产者的配置信息
Properties properties = new Properties();
//指定连接的集群
//properties.put("bootstrap.servers","hadoop102:9092");
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop102:9092");
//ack应答级别
//properties.put("acks", "all");
properties.put(ProducerConfig.ACKS_CONFIG, "all");
//重试次数
properties.put("retries", 3);
//批次大小16K
properties.put("batch.size", 16384);
//等待时间1ms
properties.put("linger.ms", 1);
//RecordAccumulator 缓冲区大小32M
properties.put("buffer.memory", 33554432);
//key序列化
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//value序列化
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//创建kafka生产者,传入生产者的配置信息
KafkaProducer producer = new KafkaProducer<>(properties);
for (int i = 0; i < 10000; i++) {
//不带回调函数
producer.send(new ProducerRecord("group-test", Integer.toString(i), Integer.toString(i))).get();
}
producer.close();
}
}
4)查看
5)Brokers监控
6)Kafka监控
7)zookeeper
8)topic