librdkafka的编译及使用
- kafka 是一个基于发布-订阅的分布式消息系统(消息队列);
- Kafka 面向大数据,消息保存在topic中,而每个 topic 可以分为多个partition;
- kafka 的消息数据保存在磁盘,每个 partition 对应磁盘上的一个文件,消息写入就是文件追加,文件可以在集群内复制备份以防丢失;即使消息被消费,kafka 也不会立即删除该消息,可以通过配置使得过一段时间后自动删除以释放磁盘空间;
- kafka依赖分布式协调服务Zookeeper,适合离线/在线信息的消费;
1、安装java
1、源码安装:https://www.oracle.com/java/technologies/downloads/#java8
2、Linux自动安装
$ sudo apt install openjdk-8-jre-headless
2、librdkafka编译
https://github.com/confluentinc/librdkafka
https://github.com/confluentinc/librdkafka/releases/tag/v2.0.2
librdkafka
编译
$ tar -xzvf librdkafka-2.0.2.tar.gz
$ cd librdkafka-2.0.2
$ ./configure --prefix=/home/pz/tool/share/build/rdkafka
$ make
$ sudo make install
3、安装使用Kafka服务
https://kafka.apache.org/downloads
1、下载官方编译好的安装包kafka_2.11-1.0.0.tgz,在linux
上安装kafka服务端。
3.1 启动zookeeper
$ cd ~/tool/kafka_2.11-1.0.0/
$ bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
$ netstat -npl|grep 2181 #查看端口占用
3.2 启动kafka服务
$ bin/kafka-server-start.sh -daemon config/server.properties #修改server.properties中的参数即可修改kafka的相关配置
$ netstat -npl|grep 9092 #查看端口占用
#发送大文件时,需要设置参数如下
socket.request.max.bytes=104857600
添加message.max.bytes=10485880
若std::string broker_list = “192.168.4.8:9092”;服务端要配置服务器监听端口,在server.properties中修改:
# The address the socket server listens on. It will get the value returned from
# java.net.InetAddress.getCanonicalHostName() if not configured.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
listeners=PLAINTEXT://:9092
# Hostname and port the broker will advertise to producers and consumers. If not set,
# it uses the value for "listeners" if configured. Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
advertised.listeners=PLAINTEXT://192.168.4.8:9092
3.3 topic相关操作
#创建topic
$ bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic em-request
#同一个topic,同时建立三个partition
$ bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 3 --topic em-response
#查看已有topic
$ bin/kafka-topics.sh --list --zookeeper localhost:2181
#显示某个topic详情
$ bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic em-response
#删除topic
$ bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic test
#如果 delete.topic.enable 未设置为true,则此操作不会产生任何影响
3.4 kafka服务关闭
$ cd /bin
$ sh kafka-server-stop.sh
使用过程遇到的错误:
1、Kafka启动报错:/bin/kafka-run-class.sh: line 258: exec: java: not found
#因为Kafka的默认/usr/bin/java路径与我们实际的$JAVA_HOME/bin/java路径不一致,解决方法如下:
$ echo $JAVA_HOME #首先找到java所在路径
$ /software/java/jdk1.8.0_131
$ ln -s /software/java/jdk1.8.0_131/jdk1.8.0_144/bin/java /usr/bin/java #然后建立一个软连接
#ln -s $JAVA_HOME/bin/java /usr/bin/java
2、error while loading shared libraries
#在linux下运行程序时,发现了error while loading shared libraries这种错误。./tests: error while loading shared libraries: xxx.so.0:cannot open shared object file: No such file or directory
#出现这类错误表示,系统不知道xxx.so在哪个目录下,因此需要将编译好的xxx.so放到/usr/local/lib目录下
3.5 部分代码
//初始化消费者(生产者同理)
void KafkaProcess::initConsumer()
{
char tmp[16];
char errstr[512];
rd_kafka_topic_conf_t *topic_conf;
rd_kafka_resp_err_t err;
rd_kafka_topic_partition_list_t *subscription;
int ret = 0;
this->consumer_conf = rd_kafka_conf_new();
ret = rd_kafka_conf_set(this->consumer_conf, "bootstrap.servers", this->brokers.c_str(), errstr, sizeof(errstr));
if (ret != RD_KAFKA_CONF_OK) {
printf("initConsumer conf bootstrap.servers:%s", errstr);
rd_kafka_conf_destroy(this->consumer_conf);
return;
}
ret = rd_kafka_conf_set(this->consumer_conf, "group.id", this->group.c_str(), errstr, sizeof(errstr));
if (ret != RD_KAFKA_CONF_OK) {
printf("initConsumer conf group.id:%s", errstr);
rd_kafka_conf_destroy(this->consumer_conf);
return;
}
if (rd_kafka_conf_set(this->consumer_conf, "security.protocol", "SASL_PLAINTEXT", errstr,
sizeof(errstr)) ||
rd_kafka_conf_set(this->consumer_conf, "sasl.mechanism", "PLAIN", errstr,
sizeof(errstr)) ||
rd_kafka_conf_set(this->consumer_conf, "sasl.username", "kafka", errstr,
sizeof(errstr)) ||
rd_kafka_conf_set(this->consumer_conf, "sasl.password", "123ABCdef*", errstr,
sizeof(errstr))) {
fprintf(stderr, "conf_set failed: %s\n", errstr);
printf("initConsumer conf sasl:%s",errstr);
return ;
}
this->consumer_rk = rd_kafka_new(RD_KAFKA_CONSUMER, this->consumer_conf, errstr, sizeof(errstr));
if(!this->consumer_rk){
printf("initConsumer: Failed to create new consumer:%s", errstr);
return;
}
rd_kafka_poll_set_consumer(this->consumer_rk);
subscription = rd_kafka_topic_partition_list_new(1);
rd_kafka_topic_partition_list_add(subscription, this->consumer_topic.c_str(), RD_KAFKA_PARTITION_UA);
if((err = rd_kafka_subscribe(this->consumer_rk, subscription))){
printf("initConsumer: Failed to start consuming topics:%s", rd_kafka_err2str(err));
rd_kafka_topic_partition_list_destroy(subscription);
rd_kafka_destroy(this->consumer_rk);
return;
}
rd_kafka_topic_partition_list_destroy(subscription);
printf("initConsumer: Consumer Init Success!");
}
//从kafka中消费数据
void KafkaProcess::consumerMsg()
{
while (true)
{
usleep(100000);
if(getMsgVecSize() >= this->msg_count){
printf("getMsgVecSize more than msg count!");
continue;
}
rd_kafka_message_t *rkmessage;
rkmessage = rd_kafka_consumer_poll(this->consumer_rk, 1000);
if (!rkmessage) {
continue;
}
if (rkmessage->err) {
printf("consumerMsg: %s\n", rd_kafka_message_errstr(rkmessage));
rd_kafka_message_destroy(rkmessage);
continue;
}
if(rkmessage){
string msg = (const char *)rkmessage->payload;
pushMsg(msg);
printf("push msg to vector, consumerMsg END \n");
rd_kafka_message_destroy(rkmessage);
}
}
}
//设置根据key值路由到不同的partition
void KafkaProcess::produceMsg(string msg, string keySn)
{
int ret = rd_kafka_produce(this->producer_rkt,
RD_KAFKA_PARTITION_UA,
RD_KAFKA_MSG_F_COPY,
(char*)msg.c_str(), msg.length(),
keySn.c_str(), keySn.length(), NULL);
if(ret == -1){
printf("produceMsg: Failed to produce to topic %s: %s\n", rd_kafka_topic_name(this->producer_rkt), rd_kafka_err2str(rd_kafka_last_error()));
}
rd_kafka_poll(this->producer_rk, 0);
}