========
centos7 安装 kafka
========
# 要先安装 zookeeper 具体 安装步骤 看 博文 "centos7 64bit zookeeper 安装,使用,开机自启"
博文地址
https://blog.csdn.net/kisscatforever/article/details/86091136
# 新建目录
mkdir /usr/local/kafka/
# 下载 url 文件到 /usr/local/kafka/
curl -o /usr/local/kafka/kafka_2.11-2.1.1.tgz http://apache.fayea.com/kafka/2.1.1/kafka_2.11-2.1.1.tgz
#tar 命令 解压到当前目录 z 通过gzip解压 x 拉出 v 冗长 f 定位归档文件
tar -zxvf kafka_2.11-2.1.1.tgz
# 删安装包
rm -rf kafka_2.11-2.1.1.tgz
# 备注 本机安装的 zookeeper 的 日志文件目录如下
cd /usr/local/zookeeper/apache-zookeeper-3.5.5-bin/dataDir
# 见 "centos 7 64位 安装 zookeeper 单机" 启动 zookeepr
# 查看 zookeeper 的状态
/usr/local/zookeeper/apache-zookeeper-3.5.5-bin/bin/zkServer.sh status
[root@iZuf6hyvanq21thxm114s6Z kafka_2.11-2.1.1]# /usr/local/zookeeper/apache-zookeeper-3.5.5-bin/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/apache-zookeeper-3.5.5-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: standalone
# 查看 zookeeper 的进程
[root@iZuf6hyvanq21thxm114s6Z kafka_2.11-2.1.1]# ps -ef | grep zookeeper
# 备份一份 kafka 配置文件
cp /usr/local/kafka/kafka_2.11-2.1.1/config/server.properties /usr/local/kafka/kafka_2.11-2.1.1/config/server.properties.bak
# 修改配置文件
vi /usr/local/kafka/kafka_2.11-2.1.1/config/server.properties
+++++++++++++++++++++++
#每个 broker 的 id 是必须不同的
broker.id=0
# 监听的地址设置, 172.18.10.245 是内网ip
listeners=PLAINTEXT://172.18.10.245:9092
#存放各种日志文件 例如: server.log kafka启动日志
log.dirs=/usr/local/kafka/kafka_2.11-2.1.1/logs
# 对外提供服务的 ip (47.103.137.122 即 外网ip)
advertised.host.name=47.103.137.122
# zookeeper 服务的部署ip与port,多个时用英文,号隔开
zookeeper.connect=172.18.10.245:2181
# 对外提供服务的 端口
advertised.port=9092
#当前kafka所在linux的ip (172.18.10.245 是内网ip)
host.name=172.18.10.245
#当前kafka所在对外提供服务的port
port=9092
# 修改每个topic的默认分区参数num.partitions,默认是1,具体合适的取值需要根据服务器配置进程确定
num.partitions=4
+++++++++++++++++++++++
# nohup 不挂断地运行命令 启动 kafka , 启动kafka
nohup /usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-server-start.sh /usr/local/kafka/kafka_2.11-2.1.1/config/server.properties &
[root@iZuf6hyvanq21thxm114s6Z bin]# nohup /usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-server-start.sh /usr/local/kafka/kafka_2.11-2.1.1/server.properties &
[1] 8394
[root@iZuf6hyvanq21thxm114s6Z bin]# nohup: ignoring input and appending output to ‘nohup.out’
# 查看是否启动成功
ps -ef | grep kafka
# 或 执行 jps 命令 看是否有 kafa
[root@iZuf6hyvanq21thxm114s6Z ~]# jps
2963 Bootstrap
2599 QuorumPeerMain
8775 Kafka
9273 Jps
#出现 kafak 的 进程 则 说明 已经启动成功
# 创建名为 yuchao_topic 主题, 2个分区,每个分区1个副本
/usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-topics.sh --create --zookeeper 172.18.10.245:2181 --replication-factor 1 --partitions 2 --topic yuchao_topic
[root@iZuf6hyvanq21thxm114s6Z ~]# /usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-topics.sh --create --zookeeper 172.18.10.245:2181 --replication-factor 1 --partitions 2 --topic yuchao_topic
WARNING: Due to limitations in metric names, topics with a period ('.') or underscore ('_') could collide. To avoid issues it is best to use either, but not both.
Created topic "yuchao_topic".
# 查看 所有主题
/usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-topics.sh --zookeeper 172.18.10.245:2181 --list
[root@iZuf6hyvanq21thxm114s6Z ~]# /usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-topics.sh --zookeeper 172.18.10.245:2181 --list
yuchao_topic
# 生产者 对主题 yuchao_topic 发送消息
/usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-console-producer.sh --broker-list 172.18.10.245:9092 --topic yuchao_topic
[root@iZuf6hyvanq21thxm114s6Z ~]# /usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-console-producer.sh --broker-list 172.18.10.245:9092 --topic yuchao_topic
>hello long long no see
# 消费者 接收 主题 yuchao_topic 存储的的生产者的消息 (备注:下面的这个方法已经废弃)
# /usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-console-consumer.sh --zookeeper 172.18.10.245:2181 --topic yuchao_topic --from-beginning
# 消费者 接收 主题 yuchao_topic 存储的的生产者的消息 (备注:上面的这个方法已经废弃)
/usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-console-consumer.sh --bootstrap-server 172.18.10.245:9092 --topic yuchao_topic --from-beginning
[root@iZuf6hyvanq21thxm114s6Z ~]# /usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-console-consumer.sh --bootstrap-server 172.18.10.245:9092 --topic yuchao_topic --from-beginning
hello long long no see
#开相应端口号
#centos 7 开端口号
[root@iZuf6hyvanq21thxm114s6Z logs]# firewall-cmd --permanent --zone=public --add-port=2181/tcp
[root@iZuf6hyvanq21thxm114s6Z logs]# firewall-cmd --permanent --zone=public --add-port=9092/tcp
success
--zone=public:表示作用域为公共的;
--add-port=8080/tcp:添加tcp协议的端口8080;
--permanent:永久生效,如果没有此参数,则只能维持当前服务生命周期内,重新启动后失效;
#备注: 开端口号之后记得重新启动防火墙
#重启防火墙
[root@iZuf6hyvanq21thxm114s6Z logs]# firewall-cmd --reload
success
#查看已开放得所有端口
[root@iZuf6hyvanq21thxm114s6Z logs]# firewall-cmd --list-ports
# 阿里云ecs实例安全规则里 要新建安全规则 对 2181 , 9092 端口 对 0.0.0.0/0 任意ip 均可访问 的权限放开
#windows cmd 下 测试
telnet 47.103.137.122 2181
telnet 47.103.137.122 9092
# 修改配置文件
vi /usr/local/kafka/kafka_2.11-2.1.1/config/server.properties
# 关闭kafka 并重新启动
/usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-server-stop.sh
# 查看是否关闭成功
ps -ef | grep kafka
# 启动kafka
nohup /usr/local/kafka/kafka_2.11-2.1.1/bin/kafka-server-start.sh /usr/local/kafka/kafka_2.11-2.1.1/config/server.properties &
# 要好好整理一下 consumer.properties 与 server.config 与 nohup.out /etc/hosts , 阿里云规则配置 等日志输出文件
# 备注 上面的这些要好好的整理一下
# 包括 spring-kafka 的 pom 版本号 与 kafka-server
# 不打印 kafka 后端日志的方法 是 user-web 下的 log4j 配置文件
https://www.cnblogs.com/lly001/p/10547285.html
========
java客户端代码
========
+++++++++++++++++++++++++++++++++
+ spring 版本号 与 spring-kafka 版本号
+++++++++++++++++++++++++++++++++
备注: spring 用的pom 版本号是 4.3.11.RELEASE
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.3.4.RELEASE</version>
</dependency>
+++++++++++++++++++++++++++++++++
+spring-kafka-consumer.xml 配置开始
+++++++++++++++++++++++++++++++++
<!-- 不靠谱结束 -->
<bean id="testConsumerProperties" class="java.util.HashMap">
<constructor-arg>
<map>
<!-- broker地址清单: host:port -->
<entry key="bootstrap.servers" value="47.103.137.122:9092" />
<!-- 是否自动提交已拉取消息的offset。提交offset即视为该消息已经成功被消费,该组下的Consumer无 -->
<entry key="enable.auto.commit" value="true" />
<!-- 每次拉取消息的数量 -->
<entry key="max.poll.records" value="5" />
<!-- 生产者接口允许使用参数化类型,可以把Java对象作为键和值传broker,但是broker希望收到的消息的键和值都是字节数组,所以,必须提供将对象序列化成字节数组的序列化器。key.serializer必须设置为实现 -->
<entry key="key.deserializer" value="org.apache.kafka.common.serialization.StringDeserializer" />
<!-- value.serializer:同上 -->
<entry key="value.deserializer" value="org.apache.kafka.common.serialization.StringDeserializer" />
</map>
</constructor-arg>
</bean>
<!-- spring 工厂 -->
<bean id="testConsumerFactory" class="org.springframework.kafka.core.DefaultKafkaConsumerFactory">
<constructor-arg>
<ref bean="testConsumerProperties" />
</constructor-arg>
</bean>
<!-- 不靠谱结束 -->
<!--yuchao_topic kafka consumer start -->
<bean id="YuchaoTopicMsgPushListener" class="com.yu.chao.kafka.topic.consumer.YuchaoTopicMsgPushListener" />
<!-- 消费者容器配置信息 -->
<bean id="yuchao_TopicMsgPushProperties" class="org.springframework.kafka.listener.config.ContainerProperties">
<constructor-arg value="yuchao_topic" />
<!-- 备注这个groupId 是在 kafka config 目录下 consumer.properties中配置的 -->
<property name="groupId" value="test-consumer-group" />
<property name="messageListener" ref="YuchaoTopicMsgPushListener" />
</bean>
<!-- 消费者并发消息监听容器,执行doStart()方法 -->
<bean id="msgPushListenerContainer" class="org.springframework.kafka.listener.ConcurrentMessageListenerContainer" init-method="doStart">
<constructor-arg ref="testConsumerFactory" />
<constructor-arg ref="yuchao_TopicMsgPushProperties" />
<property name="concurrency" value="2" />
</bean>
<!--yuchao_topic kafka consumer end -->
+++++++++++++++++++++++++++++++++
+spring-kafka-consumer.xml 配置结束
+++++++++++++++++++++++++++++++++
+++++++++++++++++++++++++++++++++
+spring-kafka-producer.xml 配置开始
+++++++++++++++++++++++++++++++++
<!-- 不靠谱开始 -->
<bean id="testProducerProperties" class="java.util.HashMap">
<constructor-arg>
<map>
<!-- 博文地址 -->
<!-- broker地址清单: host:port -->
<entry key="bootstrap.servers" value="47.103.137.122:9092" />
<!-- acks:指定了必须要有多少个分区副本收到消息,生产者才会认为写入消息是成功的,这个参数对消息丢失的可能性有重大影响。 acks=0:生产者在写入消息之前不会等待任何来自服务器的响应,容易丢消息,但是吞吐量高。 acks=1:只要集群的首领节点收到消息,生产者会收到来自服务器的成功响应。如果消息无法到达首领节点(比如首领节点崩溃,新首领没有选举出来),生产者会收到一个错误响应,为了避免数据丢失,生产者会重发消息。不过,如果一个没有收到消息的节点成为新首领,消息还是会丢失。默认使用这个配置。 acks=all:只有当所有参与复制的节点都收到消息,生产者才会收到一个来自服务器的成功响应。延迟高 -->
<entry key="acks" value="0" />
<!-- 指定生产者可以重发消息的次数 -->
<entry key="retries" value="1" />
<!-- 发送消息请求的超时时间 -->
<entry key="request.timeout.ms" value="3000" />
<!-- 指定了在调用send()方法或者使用partitionsFor()方法获取元数据时生产者的阻塞时间。当生产者的发送缓冲区已满,或者没有可用的元数据时,这些方法就会阻塞。在阻塞时间达到max.block.ms时,生产者会抛出超时异常 -->
<entry key="max.block.ms" value="3000" />
<!-- 生产者接口允许使用参数化类型,可以把Java对象作为键和值传broker,但是broker希望收到的消息的键和值都是字节数组,所以,必须提供将对象序列化成字节数组的序列化器。key.serializer必须设置为实现 -->
<entry key="key.serializer" value="org.apache.kafka.common.serialization.StringSerializer" />
<!-- value.serializer:同上 -->
<entry key="value.serializer" value="org.apache.kafka.common.serialization.StringSerializer" />
</map>
</constructor-arg>
</bean>
<bean id="testProducerFactory" class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
<constructor-arg>
<ref bean="testProducerProperties" />
</constructor-arg>
</bean>
<!-- 不靠谱结束 -->
<!--kafka producer start -->
<!-- 主题 yuchao_topic 消息生产者 -->
<bean id="yuchaoTopicSender" class="org.springframework.kafka.core.KafkaTemplate">
<constructor-arg ref="testProducerFactory" />
<constructor-arg name="autoFlush" value="false" />
<property name="defaultTopic" value="yuchao_topic" />
</bean>
<!--kafka producer end -->
+++++++++++++++++++++++++++++++++
+spring-kafka-producer.xml 配置结束
+++++++++++++++++++++++++++++++++
+++++++++++++++++++++++++++++++++
+主题 yuchao_topic 消息生产者
+++++++++++++++++++++++++++++++++
@Controller
@RequestMapping("/YuchaoTopicSenderActionTest")
public class YuchaoTopicSenderActionTest {
protected final Logger logger = LoggerFactory.getLogger(this.getClass());
@Resource(name = "yuchaoTopicSender")
private KafkaTemplate kafkaTemplate;
/**
* 对主题发消息
* http://localhost:9901/user-web/YuchaoTopicSenderActionTest/msgTo_yuchao_topic
*/
@ResponseBody
@RequestMapping(value="msgTo_yuchao_topic", method = {RequestMethod.GET,RequestMethod.POST})
public Map<String, Object> msgTo_yuchao_topic(
ModelMap model,
HttpSession session,
HttpServletRequest request,
HttpServletResponse response,
@RequestParam(value = "msg",required = false) String msg
){
logger.info(" 类 msgTo_yuchao_topic 方法 开始====");
//存放响应结果
Map<String,Object> res=new HashMap<String,Object>();
try {
kafkaTemplate.sendDefault(msg);
res.put("发送消息", msg);
res.put("success", Boolean.TRUE);
logger.info(" 对主题 yuchao_topic 成功发送消息: [{}] " ,msg);
} catch (Exception e) {
logger.info(" 类 msgToTopic 方法 异常信息如下: ",e);
res.put("success", Boolean.FALSE);
res.put("message",e.getMessage());
}
logger.info(" 类 msgTo_yuchao_topic 方法 结束====\n");
return res;
}
+++++++++++++++++++++++++++++++++
+主题 yuchao_topic 消息消费者
+++++++++++++++++++++++++++++++++
public class YuchaoTopicMsgPushListener implements MessageListener<String, String> {
private static final Logger logger = LoggerFactory.getLogger(YuchaoTopicMsgPushListener.class);
public void onMessage(ConsumerRecord<String, String> record) {
try {
String msg=record.value();
logger.info("yuchao_topic主题接收到消息 msg: [{}] " ,msg);
} catch (Exception e) {
logger.error("接收到消息:{} 出现异常 {}", record, e.getMessage(), e);
}
}
}
+++++++++++++++++++++++++++++++++
+日志如下
+++++++++++++++++++++++++++++++++
2019-06-15 18:24:37,502 [qtp1585787493-42] INFO [com.yu.chao.kafka.topic.action.YuchaoTopicSenderActionTest] - 类 msgTo_yuchao_topic 方法 开始====
2019-06-15 18:24:37,508 [qtp1585787493-42] INFO [org.apache.kafka.clients.producer.ProducerConfig] - ProducerConfig values:
acks = 1
batch.size = 16384
bootstrap.servers = [47.103.137.122:9092]
buffer.memory = 33554432
client.id =
compression.type = none
connections.max.idle.ms = 540000
enable.idempotence = false
interceptor.classes = []
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 2000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 2000
retries = 1
retry.backoff.ms = 100
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = null
value.serializer = class org.apache.kafka.common.serialization.StringSerializer
2019-06-15 18:24:37,529 [qtp1585787493-42] INFO [org.apache.kafka.common.utils.AppInfoParser] - Kafka version : 1.1.0
2019-06-15 18:24:37,542 [qtp1585787493-42] INFO [org.apache.kafka.common.utils.AppInfoParser] - Kafka commitId : fdcf75ea326b8e07
2019-06-15 18:24:37,570 [kafka-producer-network-thread | producer-1] INFO [org.apache.kafka.clients.Metadata] - Cluster ID: 94n777o0TIC4DiBWOxegQw
2019-06-15 18:24:37,583 [qtp1585787493-42] INFO [com.yu.chao.kafka.topic.action.YuchaoTopicSenderActionTest] - 对主题 yuchao_topic 成功发送消息: [对yuchao_topic主题消息发送时间:2019-06-15 18:24:37]
2019-06-15 18:24:37,583 [qtp1585787493-42] INFO [com.yu.chao.kafka.topic.action.YuchaoTopicSenderActionTest] - 类 msgTo_yuchao_topic 方法 结束====
2019-06-15 18:24:37,628 [msgPushListenerContainer-1-C-1] INFO [com.yu.chao.kafka.topic.consumer.YuchaoTopicMsgPushListener] - yuchao_topic主题接收到消息 msg: [对yuchao_topic主题消息发送时间:2019-06-15 18:24:37]