对嵌入式来说,protobuf是真不好用,建议不用真心,便利性不如json一根毛
Kafka移植===============================================================
1)官方下载:GitHub - confluentinc/librdkafka: The Apache Kafka C/C++ library;
2)更改交叉编译链:
./configure : ./configure --host=arm-linux --cc=arm-linux-gnueabihf-gcc --cxx=arm-linux-gnueabihf-g++
3)然后 make并修复编译报错, 然后sudo make install;
4) 在/usr/local/下可以查看到生成库的动态与静态库文件、头文件。
Kafka执行文件编译=========================================================
5)书写生产者kafka_client.c与消费者代码,以c编写;
6)echo $CROSS_xxx查看交叉编译的环境变量是arm-linux-gnueabihf-,可以为后续做makefile做铺垫,也需要关联toolchain的path。
7)编译包含库文件与头文件: arm-linux-gnueabihf-gcc -o send_kafka kafka_client.c -I/usr/local/include/librdkafka -L/usr/local/lib/ -lrdkafka++ -L/usr/local/lib/ -lrdkafka -lpthread
8)用file命令查看send_kafka,可以看到其是dynamic的链接的so文件,可以添加-static更改为.a的链接库,但是目前会有报错,懒得修了,直接用动态库做。
9)将send_kafka以及库文件copy到ARM板子上,so文件也需要copy,如果没放在/usr/lib中则需要添加lib路径:export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH ;
并且需要软链接:
ln -sf librdkafka.so.1 librdkafka.so
ln -sf librdkafka++.so.1 librdkafka++.so
10)最后执行./send_kafka。
生产者与消费者代码参考详解C/C++如何发送与接收Kafka消息_C 语言_脚本之家
#include <rdkafka.h> // 包含C API头文件
#include <iostream>
#include <cstring>
#include <cerrno>
int main() {
const char *brokers = "xx.xx.xx.xx:7091"; // Kafka broker地址
const char *topic_name = "kafka_msg_topic_test";
const char *payload = "Hello, Kafka from librdkafka!";
size_t len = strlen(payload);
// 创建配置对象
rd_kafka_conf_t *conf = rd_kafka_conf_new();
if (!conf) {
std::cerr << "Failed to create configuration object: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
return 1;
}
// 设置broker地址
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, NULL, 0) != RD_KAFKA_CONF_OK) {
std::cerr << "Failed to set bootstrap.servers: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
rd_kafka_conf_destroy(conf);
return 1;
}
// 创建生产者实例
rd_kafka_t *rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, NULL, 0);
if (!rk) {
std::cerr << "Failed to create producer: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
rd_kafka_conf_destroy(conf);
return 1;
}
// 创建topic句柄(可选,但推荐)
rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, topic_name, NULL);
if (!rkt) {
std::cerr << "Failed to create topic handle: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
rd_kafka_destroy(rk);
// rd_kafka_conf_destroy(conf);
return 1;
}
// 发送消息
int32_t partition = RD_KAFKA_PARTITION_UA; // 自动选择分区
int err = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, const_cast<char *>(payload), len, NULL, 0, NULL);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
std::cerr << "Failed to produce to topic " << topic_name << ": " << err << std::endl;
} else {
std::cout << "Produced " << len << " bytes to topic " << topic_name << std::endl;
}
// 等待所有消息发送完成(可选,但推荐)
// 在实际生产代码中,您可能需要更复杂的逻辑来处理消息的发送和确认
int msgs_sent = 0;
while (rd_kafka_outq_len(rk) > 0) {
rd_kafka_poll(rk, 100); // 轮询Kafka队列,直到所有消息都发送出去
msgs_sent += rd_kafka_outq_len(rk);
}
// 销毁topic句柄
rd_kafka_topic_destroy(rkt);
// 销毁生产者实例
rd_kafka_destroy(rk);
// 销毁配置对象
// rd_kafka_conf_destroy(conf);
return 0;
}
#include <rdkafka.h>
#include <iostream>
#include <cerrno>
#include <cstring>
#include <cstdlib>
void error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
// 错误处理回调
std::cerr << "Kafka error: " << err << ": " << reason << std::endl;
}
int main() {
std::cerr << "start " << std::endl;
const char *brokers = "xx.xx.xx.xx:7091"; // Kafka broker地址
const char *group_id = "kafka_msg_topic_test"; // 消费者组ID
const char *topic_name = "kafka_msg_topic_test"; // Kafka topic名称
// 创建配置对象
rd_kafka_conf_t *conf = rd_kafka_conf_new();
if (!conf) {
std::cerr << "Failed to create configuration object: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
return 1;
}
// 设置broker地址
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, NULL, 0) != RD_KAFKA_CONF_OK) {
std::cerr << "Failed to set bootstrap.servers: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
rd_kafka_conf_destroy(conf);
return 1;
}
// 设置消费者组ID
if (rd_kafka_conf_set(conf, "group.id", group_id, NULL, 0) != RD_KAFKA_CONF_OK) {
std::cerr << "Failed to set group.id: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
rd_kafka_conf_destroy(conf);
return 1;
}
// 设置错误处理回调(可选)
rd_kafka_conf_set_error_cb(conf, error_cb);
// 创建消费者实例
rd_kafka_t *rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, NULL, 0);
if (!rk) {
std::cerr << "Failed to create consumer: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
return 1;
}
// 创建一个topic分区列表
rd_kafka_topic_partition_list_t *topics = rd_kafka_topic_partition_list_new(1);
if (!topics) {
std::cerr << "Failed to create topic partition list: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
rd_kafka_destroy(rk);
return 1;
}
// 添加topic到分区列表
if (!rd_kafka_topic_partition_list_add(topics, topic_name, RD_KAFKA_PARTITION_UA)) {
std::cerr << "Failed to add topic to partition list: " << rd_kafka_err2str(rd_kafka_last_error()) << std::endl;
rd_kafka_topic_partition_list_destroy(topics);
rd_kafka_destroy(rk);
return 1;
}
// 订阅topic
rd_kafka_resp_err_t err = rd_kafka_subscribe(rk, topics);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
std::cerr << "Failed to subscribe to topic: " << rd_kafka_err2str(err) << std::endl;
rd_kafka_topic_partition_list_destroy(topics);
rd_kafka_destroy(rk);
return 1;
}
// 销毁分区列表(订阅后不再需要)
rd_kafka_topic_partition_list_destroy(topics);
// 轮询消息
while (true) {
rd_kafka_message_t *rkmessage;
rkmessage = rd_kafka_consumer_poll(rk, 1000); // 等待1秒以获取消息
if (rkmessage == NULL) {
// 没有消息或者超时
continue;
}
if (rkmessage->err) {
// 处理错误
if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
// 消息流的末尾
std::cout << "End of partition event" << std::endl;
} else {
// 打印错误并退出
std::cerr << "Kafka consumer error: " << rd_kafka_message_errstr(rkmessage) << std::endl;
break;
}
} else {
// 处理消息
std::cout << "Received message at offset " << rkmessage->offset
<< " from partition " << rkmessage->partition
<< " with key \"" << rkmessage->key << "\" and payload size "<< rkmessage->len
<< " value :" <<(char *)rkmessage->payload
<< std::endl;
// 如果需要,可以在这里处理消息内容
// 例如,使用rkmessage->payload()获取消息内容
// 释放消息
rd_kafka_message_destroy(rkmessage);
}
}
// 清理
rd_kafka_destroy(rk);
return 0;
}
Protobuf移植=============================================================
11)必须先装protobuf(c++的),新版本自己拉吧。
GitHub - protocolbuffers/protobuf: Protocol Buffers - Google's data interchange format
12)新版编译比较麻烦,又是蛋疼点,别的作者基本没有写过新版本编译或者一堆编译不过的,看readme自己撸。照着来保证过,readme就这么写的。
a)git submodule update --init --recursive
b)cmake --build . --parallel 10
c)sudo make install
编完大概就这样,so都出来了,也没有搞什么release,搞了又编不过。
Protobuf-c移植=============================================================
12)linux系统安装protobuf-c工具(只管C):
git继续拉GitHub - protobuf-c/protobuf-c: Protocol Buffers implementation in C
13)还是以前版本编译舒服,搞什么cmake。
./autogen.sh
./configure --host=arm-linux CC=arm-linux-gnueabihf-gcc
14)找不到protobuf,继续蛋疼
find找下protobuf的pkgconfig,我的在这
改PKG_CONFIG_PATH=/home/user/protobuf/protobuf/install/release/lib/pkgconfig
export PKG_CONFIG_PATH
15)编译总算过了(添加protobuf lib文件位置export LD_LIBRARY_PATH=/usr/local/lib:XXXX)。
make
make install
16)根据protobuf定义,预设.proto文件
syntax = "proto3";
message objLocus {
string deviceSn = 1;
string time = 2;
uint32 count = 3;
message locus {
string id = 1;
uint32 objType = 2;
uint32 lane = 3;
}
repeated locus locusList = 4;
}
17)编译proto.c与.h文件,空格很坑要注意
./protoc-c --c_out=. hurys.proto
18)生成的hurys.pb-c.c hurys.pb-c.h,就可以用在代码中作为类似于json.c的可调用模式编写了。用起来很麻烦,也没有json好用,实在闲json慢,老老实实按应用层字节协议传吧,再次不建议嵌入式linux用这破玩意,google开发这个估计都是用在java上的。