【消息中间件】kafka单机部署使用

单机安装部署

下载地址:

Index of /dist/kafka (apache.org)https://archive.apache.org/dist/kafka/

[root@VM-12-4-centos kafka]# tar -zxvf kafka_2.13-3.2.0.tgz 
[root@VM-12-4-centos kafka]# tree
.
├── kafka_2.13-3.2.0
│   ├── bin
│   │   ├── connect-distributed.sh
│   │   ├── connect-mirror-maker.sh
│   │   ├── connect-standalone.sh
│   │   ├── kafka-acls.sh
│   │   ├── kafka-broker-api-versions.sh
│   │   ├── kafka-cluster.sh
│   │   ├── kafka-configs.sh
│   │   ├── kafka-console-consumer.sh
│   │   ├── kafka-console-producer.sh
│   │   ├── kafka-consumer-groups.sh
│   │   ├── kafka-consumer-perf-test.sh
│   │   ├── kafka-delegation-tokens.sh
│   │   ├── kafka-delete-records.sh
│   │   ├── kafka-dump-log.sh
│   │   ├── kafka-features.sh
│   │   ├── kafka-get-offsets.sh
│   │   ├── kafka-leader-election.sh
│   │   ├── kafka-log-dirs.sh
│   │   ├── kafka-metadata-shell.sh
│   │   ├── kafka-mirror-maker.sh
│   │   ├── kafka-producer-perf-test.sh
│   │   ├── kafka-reassign-partitions.sh
│   │   ├── kafka-replica-verification.sh
│   │   ├── kafka-run-class.sh
│   │   ├── kafka-server-start.sh
│   │   ├── kafka-server-stop.sh
│   │   ├── kafka-storage.sh
│   │   ├── kafka-streams-application-reset.sh
│   │   ├── kafka-topics.sh
│   │   ├── kafka-transactions.sh
│   │   ├── kafka-verifiable-consumer.sh
│   │   ├── kafka-verifiable-producer.sh
│   │   ├── trogdor.sh
│   │   ├── windows
│   │   │   ├── connect-distributed.bat
│   │   │   ├── connect-standalone.bat
│   │   │   ├── kafka-acls.bat
│   │   │   ├── kafka-broker-api-versions.bat
│   │   │   ├── kafka-configs.bat
│   │   │   ├── kafka-console-consumer.bat
│   │   │   ├── kafka-console-producer.bat
│   │   │   ├── kafka-consumer-groups.bat
│   │   │   ├── kafka-consumer-perf-test.bat
│   │   │   ├── kafka-delegation-tokens.bat
│   │   │   ├── kafka-delete-records.bat
│   │   │   ├── kafka-dump-log.bat
│   │   │   ├── kafka-get-offsets.bat
│   │   │   ├── kafka-leader-election.bat
│   │   │   ├── kafka-log-dirs.bat
│   │   │   ├── kafka-mirror-maker.bat
│   │   │   ├── kafka-producer-perf-test.bat
│   │   │   ├── kafka-reassign-partitions.bat
│   │   │   ├── kafka-replica-verification.bat
│   │   │   ├── kafka-run-class.bat
│   │   │   ├── kafka-server-start.bat
│   │   │   ├── kafka-server-stop.bat
│   │   │   ├── kafka-storage.bat
│   │   │   ├── kafka-streams-application-reset.bat
│   │   │   ├── kafka-topics.bat
│   │   │   ├── kafka-transactions.bat
│   │   │   ├── zookeeper-server-start.bat
│   │   │   ├── zookeeper-server-stop.bat
│   │   │   └── zookeeper-shell.bat
│   │   ├── zookeeper-security-migration.sh
│   │   ├── zookeeper-server-start.sh
│   │   ├── zookeeper-server-stop.sh
│   │   └── zookeeper-shell.sh
│   ├── config
│   │   ├── connect-console-sink.properties
│   │   ├── connect-console-source.properties
│   │   ├── connect-distributed.properties
│   │   ├── connect-file-sink.properties
│   │   ├── connect-file-source.properties
│   │   ├── connect-log4j.properties
│   │   ├── connect-mirror-maker.properties
│   │   ├── connect-standalone.properties
│   │   ├── consumer.properties
│   │   ├── kraft
│   │   │   ├── broker.properties
│   │   │   ├── controller.properties
│   │   │   ├── README.md
│   │   │   └── server.properties
│   │   ├── log4j.properties
│   │   ├── producer.properties
│   │   ├── server.properties
│   │   ├── tools-log4j.properties
│   │   ├── trogdor.conf
│   │   └── zookeeper.properties
│   ├── libs
│   │   ├── activation-1.1.1.jar
│   │   ├── aopalliance-repackaged-2.6.1.jar
│   │   ├── argparse4j-0.7.0.jar
│   │   ├── audience-annotations-0.5.0.jar
│   │   ├── commons-cli-1.4.jar
│   │   ├── commons-lang3-3.8.1.jar
│   │   ├── connect-api-3.2.0.jar
│   │   ├── connect-basic-auth-extension-3.2.0.jar
│   │   ├── connect-file-3.2.0.jar
│   │   ├── connect-json-3.2.0.jar
│   │   ├── connect-mirror-3.2.0.jar
│   │   ├── connect-mirror-client-3.2.0.jar
│   │   ├── connect-runtime-3.2.0.jar
│   │   ├── connect-transforms-3.2.0.jar
│   │   ├── hk2-api-2.6.1.jar
│   │   ├── hk2-locator-2.6.1.jar
│   │   ├── hk2-utils-2.6.1.jar
│   │   ├── jackson-annotations-2.12.6.jar
│   │   ├── jackson-core-2.12.6.jar
│   │   ├── jackson-databind-2.12.6.1.jar
│   │   ├── jackson-dataformat-csv-2.12.6.jar
│   │   ├── jackson-datatype-jdk8-2.12.6.jar
│   │   ├── jackson-jaxrs-base-2.12.6.jar
│   │   ├── jackson-jaxrs-json-provider-2.12.6.jar
│   │   ├── jackson-module-jaxb-annotations-2.12.6.jar
│   │   ├── jackson-module-scala_2.13-2.12.6.jar
│   │   ├── jakarta.activation-api-1.2.1.jar
│   │   ├── jakarta.annotation-api-1.3.5.jar
│   │   ├── jakarta.inject-2.6.1.jar
│   │   ├── jakarta.validation-api-2.0.2.jar
│   │   ├── jakarta.ws.rs-api-2.1.6.jar
│   │   ├── jakarta.xml.bind-api-2.3.2.jar
│   │   ├── javassist-3.27.0-GA.jar
│   │   ├── javax.servlet-api-3.1.0.jar
│   │   ├── javax.ws.rs-api-2.1.1.jar
│   │   ├── jaxb-api-2.3.0.jar
│   │   ├── jersey-client-2.34.jar
│   │   ├── jersey-common-2.34.jar
│   │   ├── jersey-container-servlet-2.34.jar
│   │   ├── jersey-container-servlet-core-2.34.jar
│   │   ├── jersey-hk2-2.34.jar
│   │   ├── jersey-server-2.34.jar
│   │   ├── jetty-client-9.4.44.v20210927.jar
│   │   ├── jetty-continuation-9.4.44.v20210927.jar
│   │   ├── jetty-http-9.4.44.v20210927.jar
│   │   ├── jetty-io-9.4.44.v20210927.jar
│   │   ├── jetty-security-9.4.44.v20210927.jar
│   │   ├── jetty-server-9.4.44.v20210927.jar
│   │   ├── jetty-servlet-9.4.44.v20210927.jar
│   │   ├── jetty-servlets-9.4.44.v20210927.jar
│   │   ├── jetty-util-9.4.44.v20210927.jar
│   │   ├── jetty-util-ajax-9.4.44.v20210927.jar
│   │   ├── jline-3.21.0.jar
│   │   ├── jopt-simple-5.0.4.jar
│   │   ├── jose4j-0.7.9.jar
│   │   ├── kafka_2.13-3.2.0.jar
│   │   ├── kafka-clients-3.2.0.jar
│   │   ├── kafka-log4j-appender-3.2.0.jar
│   │   ├── kafka-metadata-3.2.0.jar
│   │   ├── kafka-raft-3.2.0.jar
│   │   ├── kafka-server-common-3.2.0.jar
│   │   ├── kafka-shell-3.2.0.jar
│   │   ├── kafka-storage-3.2.0.jar
│   │   ├── kafka-storage-api-3.2.0.jar
│   │   ├── kafka-streams-3.2.0.jar
│   │   ├── kafka-streams-examples-3.2.0.jar
│   │   ├── kafka-streams-scala_2.13-3.2.0.jar
│   │   ├── kafka-streams-test-utils-3.2.0.jar
│   │   ├── kafka-tools-3.2.0.jar
│   │   ├── lz4-java-1.8.0.jar
│   │   ├── maven-artifact-3.8.4.jar
│   │   ├── metrics-core-2.2.0.jar
│   │   ├── metrics-core-4.1.12.1.jar
│   │   ├── netty-buffer-4.1.73.Final.jar
│   │   ├── netty-codec-4.1.73.Final.jar
│   │   ├── netty-common-4.1.73.Final.jar
│   │   ├── netty-handler-4.1.73.Final.jar
│   │   ├── netty-resolver-4.1.73.Final.jar
│   │   ├── netty-tcnative-classes-2.0.46.Final.jar
│   │   ├── netty-transport-4.1.73.Final.jar
│   │   ├── netty-transport-classes-epoll-4.1.73.Final.jar
│   │   ├── netty-transport-native-epoll-4.1.73.Final.jar
│   │   ├── netty-transport-native-unix-common-4.1.73.Final.jar
│   │   ├── osgi-resource-locator-1.0.3.jar
│   │   ├── paranamer-2.8.jar
│   │   ├── plexus-utils-3.3.0.jar
│   │   ├── reflections-0.9.12.jar
│   │   ├── reload4j-1.2.19.jar
│   │   ├── rocksdbjni-6.29.4.1.jar
│   │   ├── scala-collection-compat_2.13-2.6.0.jar
│   │   ├── scala-java8-compat_2.13-1.0.2.jar
│   │   ├── scala-library-2.13.8.jar
│   │   ├── scala-logging_2.13-3.9.4.jar
│   │   ├── scala-reflect-2.13.8.jar
│   │   ├── slf4j-api-1.7.36.jar
│   │   ├── slf4j-reload4j-1.7.36.jar
│   │   ├── snappy-java-1.1.8.4.jar
│   │   ├── trogdor-3.2.0.jar
│   │   ├── zookeeper-3.6.3.jar
│   │   ├── zookeeper-jute-3.6.3.jar
│   │   └── zstd-jni-1.5.2-1.jar
│   ├── LICENSE
│   ├── licenses
│   │   ├── argparse-MIT
│   │   ├── CDDL+GPL-1.1
│   │   ├── DWTFYWTPL
│   │   ├── eclipse-distribution-license-1.0
│   │   ├── eclipse-public-license-2.0
│   │   ├── jline-BSD-3-clause
│   │   ├── jopt-simple-MIT
│   │   ├── paranamer-BSD-3-clause
│   │   ├── slf4j-MIT
│   │   └── zstd-jni-BSD-2-clause
│   ├── NOTICE
│   └── site-docs
│       └── kafka_2.13-3.2.0-site-docs.tgz
└── kafka_2.13-3.2.0.tgz
[root@VM-12-4-centos kafka]# cd kafka_2.13-3.2.0/config/
[root@VM-12-4-centos config]# vim zookeeper.properties 


# the directory where the snapshot is stored.
dataDir=/data/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# Disable the adminserver by default to avoid port conflicts.
# Set the port to something non-conflicting if choosing to enable this
admin.enableServer=false
# admin.serverPort=8080
[root@VM-12-4-centos config]# vim server.properties


# 若需要外部访问  一定需要配置listeners   默认为本机IP   端口默认9092 


log.dirs=/data/tmp/kafka-logs
############################# Socket Server Settings #############################

# The address the socket server listens on. If not configured, the host name will be equal to the value of
# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
#   FORMAT:
#     listeners = listener_name://host_name:port
#   EXAMPLE:
#     listeners = PLAINTEXT://your.host.name:9092
listeners=PLAINTEXT://内网IP:9092

# Listener name, hostname and port the broker will advertise to clients.
# If not set, it uses the value for "listeners".
#advertised.listeners=PLAINTEXT://your.host.name:9092
#advertised.host.name=外网ip或者域名
advertised.listeners=PLAINTEXT://外网ip或者域名:9092


zookeeper.connect=localhost:2181

# 启动 zookeeper
[root@VM-12-4-centos bin]# ./zookeeper-server-start.sh -daemon ../config/zookeeper.properties 

# 启动kafka
[root@VM-12-4-centos bin]# ./kafka-server-start.sh -daemon ../config/server.properties

# 修改kafka kafka-server-start.sh 默认启动内存 KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" 修改为如下256M 和 128M
if [ $# -lt 1 ];
then
        echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
        exit 1
fi
base_dir=$(dirname $0)

if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
    export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
fi

if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
    export KAFKA_HEAP_OPTS="-Xmx256M -Xms128M"
fi

EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}

COMMAND=$1
case $COMMAND in
  -daemon)
    EXTRA_ARGS="-daemon "$EXTRA_ARGS
    shift
    ;;
  *)
    ;;
esac

exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"

一站式Apache Kafka集群指标监控与运维管控平台,感兴趣的可以试用下

GitHub - didi/LogiKM: 一站式Apache Kafka集群指标监控与运维管控平台https://github.com/didi/LogiKM

EFAKhttps://www.kafka-eagle.org/

示例代码

项目下载地址:

(20条消息) 消息队列学习(springboot+kafka+activemq)-Java文档类资源-CSDN文库

pom.xml

<dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.11</version>
            <scope>test</scope>
        </dependency>
    </dependencies>

application.yml

server:
  port: 8080

spring:
  kafka:
    bootstrap-servers: ip:port
    producer: # 生产者
      retries: 3 # 设置大于0的值,则客户端会将发送失败的记录重新发送
      batch-size: 16384
      buffer-memory: 33554432
      acks: 1
      # 指定消息key和消息体的编解码方式
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
    consumer:
      group-id: default-group
      enable-auto-commit: false
      auto-offset-reset: earliest
      # 配置key的序列化器
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      # 配置value的序列化器
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    listener:
      ack-mode: manual_immediate

Application

import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;

@SpringBootApplication
public class JmsMessageApplication
{
    public static void main( String[] args )
    {
        SpringApplication.run(JmsMessageApplication.class);
    }
}

KafkaConsumer

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

@Component
public class KafkaConsumer {
    @KafkaListener(topics = "topic", groupId = "group_one")
    public void listenGroupOne(ConsumerRecord<String, String> record, Acknowledgment ack) {
        String value = record.value();
        System.out.println("group_one message: " + value);
        System.out.println("group_one record: " + record);
        ack.acknowledge();
    }
    @KafkaListener(topics = "topic", groupId = "group_two")
    public void listenGroupTwo(ConsumerRecord<String, String> record, Acknowledgment ack) {
        String value = record.value();
        System.out.println("group_two message: " + value);
        System.out.println("group_two record: " + record);
        ack.acknowledge();
    }
    @KafkaListener(topics = "topic")
    public void listenComsumer(ConsumerRecord<String, String> record, Acknowledgment ack) {
        String value = record.value();
        System.out.println("consumer message: " + value);
        System.out.println("consumer record: " + record);
        ack.acknowledge();
    }
}

RequestController

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;

@RestController
public class RequestController {
    private final static String TOPIC_NAME = "topic";

    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;

    @RequestMapping("/produce")
    public String send(@RequestParam("msg") String msg) {
        kafkaTemplate.send(TOPIC_NAME, "key", msg);
        return String.format("消息 %s 发送成功!", msg);
    }
}

启动应用JmsMessageApplication ,调用接口produce发送消息,consumer消费,查看控制台日志。

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

每天都要有成长

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值