Kafka架构进阶
Kafka-Eagle(鹰眼)
这是一个监视系统,监视您的kafka群集以及可视的使用者线程,偏移量,所有者等。当您安装Kafka Eagle时,用户可以看到当前的使用者组,对于每个组,他们正在消耗的Topic以及该组在每个主题中的偏移量,滞后,日志大小和位置。这对于了解用户从消息队列消耗的速度以及消息队列增加的速度很有用。
下载地址:https://codeload.github.com/smartloli/kafka-eagle-bin/tar.gz/v1.4.0
安装
[root@CentOS ~]# tar -zxf kafka-eagle-web-1.4.0-bin.tar.gz -C /usr/
[root@CentOS ~]# mv /usr/kafka-eagle-web-1.4.0 /usr/kafka-eagle
[root@CentOS ~]# vi .bashrc
KE_HOME=/usr/kafka-eagle
M2_HOME=/usr/apache-maven-3.6.3
SQOOP_HOME=/usr/sqoop-1.4.7
HIVE_HOME=/usr/apache-hive-1.2.2-bin
JAVA_HOME=/usr/java/latest
HADOOP_HOME=/usr/hadoop-2.9.2/
HBASE_HOME=/usr/hbase-1.2.4/
ZOOKEEPER_HOME=/usr/zookeeper-3.4.6
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HBASE_HOME/bin:$M2_HOME/bin:$HIVE_HOME/bin:$SQOOP_HOME/bin:$ZOOKEEPER_HOME/bin:$KE_HOME/bin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
export HADOOP_HOME
export HBASE_HOME
HBASE_CLASSPATH=$(/usr/hbase-1.2.4/bin/hbase classpath)
HADOOP_CLASSPATH=/root/mysql-connector-java-5.1.49.jar
export HADOOP_CLASSPATH
export M2_HOME
export HIVE_HOME
export SQOOP_HOME
export ZOOKEEPER_HOME
export KE_HOME
[root@CentOS ~]# source .bashrc
[root@CentOS ~]# cd /usr/kafka-eagle/
[root@CentOS kafka-eagle]# vi conf/system-config.properties
######################################
# multi zookeeper&kafka cluster list
######################################
kafka.eagle.zk.cluster.alias=cluster1
cluster1.zk.list=CentOS:2181
######################################
# zk client thread limit
######################################
kafka.zk.limit.size=25
######################################
# kafka eagle webui port
######################################
kafka.eagle.webui.port=8048
######################################
# kafka offset storage
######################################
cluster1.kafka.eagle.offset.storage=kafka
######################################
# kafka metrics, 30 days by default
######################################
kafka.eagle.metrics.charts=true
kafka.eagle.metrics.retain=30
######################################
# kafka sql topic records max
######################################
kafka.eagle.sql.topic.records.max=5000
kafka.eagle.sql.fix.error=false
######################################
# delete kafka topic token
######################################
kafka.eagle.topic.token=keadmin
######################################
# kafka sasl authenticate
######################################
cluster1.kafka.eagle.sasl.enable=false
cluster1.kafka.eagle.sasl.protocol=SASL_PLAINTEXT
cluster1.kafka.eagle.sasl.mechanism=SCRAM-SHA-256
cluster1.kafka.eagle.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka" password="kafka-eagle";
cluster1.kafka.eagle.sasl.client.id=
######################################
# kafka sqlite jdbc driver address
######################################
#kafka.eagle.driver=org.sqlite.JDBC
#kafka.eagle.url=jdbc:sqlite:/hadoop/kafka-eagle/db/ke.db
#kafka.eagle.username=root
#kafka.eagle.password=www.kafka-eagle.org
######################################
# kafka mysql jdbc driver address
######################################
kafka.eagle.driver=com.mysql.jdbc.Driver
kafka.eagle.url=jdbc:mysql://CentOS:3306/ke?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull
kafka.eagle.username=root
kafka.eagle.password=root
[root@CentOS kafka-eagle]# chmod u+x bin/ke.sh
如果需要检测Kafka性能指标需要修改Kafka启动文件
[root@CentOS ~]# cd /usr/kafka_2.11-2.2.0/
[root@CentOS kafka_2.11-2.2.0]# vi bin/kafka-server-start.sh
...
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-server -Xms2G -Xmx2G -XX:PermSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=8 -XX:ConcGCThreads=5 -XX:InitiatingHeapOccupancyPercent=70"
export JMX_PORT="9999"
#export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
fi
重启Kafka服务使用
kafka-server-stop.sh
关闭kafka服务!
[root@CentOS kafka-eagle]# ./bin/ke.sh start
Flume&Kafka
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = CentOS
a1.sources.r1.port = 44444
# Describe the sink
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.kafka.topic = topic01
a1.sinks.k1.kafka.bootstrap.servers = CentOSA:9092,CentOSB:9092,CentOSC:9092
a1.sinks.k1.kafka.flumeBatchSize = 20
a1.sinks.k1.kafka.producer.acks = -1
a1.sinks.k1.kafka.producer.linger.ms = 100
a1.sinks.k1.kafka.producer.compression.type = snappy
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
Spring Boot & Kafka
- 引入依赖
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.1.5.RELEASE</version>
</parent>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
<!--测试-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
@SpringBootTest(classes = {KafkaSpringBootApplication.class})
@RunWith(SpringRunner.class)
public class KafkaTempolateTests {
@Autowired
private KafkaTemplate kafkaTemplate;
@Autowired
private IOrderService orderService;
@Test
public void testOrderService(){
orderService.saveOrder("001","Dora edu ");
}
@Test
public void testKafkaTemplate(){
kafkaTemplate.executeInTransaction(new KafkaOperations.OperationsCallback() {
@Override
public Object doInOperations(KafkaOperations kafkaOperations) {
return kafkaOperations.send(new ProducerRecord("topic01","002","this is a demo"));
}
});
}
}
spring.kafka.bootstrap-servers=CentOSA:9092,CentOSB:9092,CentOSC:9092
spring.kafka.producer.retries=5
spring.kafka.producer.acks=all
spring.kafka.producer.batch-size=16384
spring.kafka.producer.buffer-memory=33554432
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.properties.enable.idempotence=true
spring.kafka.producer.transaction-id-prefix=transaction-id-
spring.kafka.consumer.group-id=group1
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.auto-commit-interval=100
spring.kafka.consumer.properties.isolation.level=read_committed
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
@SpringBootApplication
@EnableKafkaStreams
@EnableKafka
public class KafkaSpringBootApplication {
public static void main(String[] args) throws IOException {
SpringApplication.run(KafkaSpringBootApplication.class,args);
System.in.read();
}
@KafkaListeners(value = {@KafkaListener(topics = {"topic04"})})
@SendTo(value = {"topic05"})
public String listenner(ConsumerRecord<?, ?> cr) {
return cr.value()+" Dora edu";
}
}
@Transactional
@Service
public class OrderService implements IOrderService {
@Autowired
private KafkaTemplate kafkaTemplate;
@Override
public void saveOrder(String id,Object message) {
kafkaTemplate.send(new ProducerRecord("topic04",id,message));
}
}
kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.properties.enable.idempotence=true
spring.kafka.producer.transaction-id-prefix=transaction-id-
spring.kafka.consumer.group-id=group1
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.auto-commit-interval=100
spring.kafka.consumer.properties.isolation.level=read_committed
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
@SpringBootApplication
@EnableKafkaStreams
@EnableKafka
public class KafkaSpringBootApplication {
public static void main(String[] args) throws IOException {
SpringApplication.run(KafkaSpringBootApplication.class,args);
System.in.read();
}
@KafkaListeners(value = {@KafkaListener(topics = {"topic04"})})
@SendTo(value = {"topic05"})
public String listenner(ConsumerRecord<?, ?> cr) {
return cr.value()+" Dora edu";
}
}
@Transactional
@Service
public class OrderService implements IOrderService {
@Autowired
private KafkaTemplate kafkaTemplate;
@Override
public void saveOrder(String id,Object message) {
kafkaTemplate.send(new ProducerRecord("topic04",id,message));
}
}