一、目的
由于部分数据类型频率为1s,从而数据规模特别大,因此完整的JSON放在Hive中解析起来,尤其是在单机环境下,效率特别慢,无法满足业务需求。
而Flume的拦截器并不能很好的转换数据,因为只能采用Java方式,从Kafka的主题A中采集数据,并解析字段,然后写入到放在Kafka主题B中
二 、原始数据格式
JSON格式比较简单,对象中包含对象
{
"deviceNo": "39",
"sourceDeviceType": null,
"sn": null,
"model": null,
"createTime": "2024-09-03 14:55:00",
"data": {
"cycle": 300,
"volumeSum": 34,
"speedAvg": 22.0,
"volumeLeft": 12,
"speedLeft": 22.0,
"volumeStraight": 22,
"speedStraight": 22.0,
"volumeRight": 0,
"speedRight": 0.0,
"volumeTurn": null,
"speedTurn": null
}
}
三、Java代码
package com.kgc;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.time.Duration;
import java.util.Collections;
import java.util.Properties;
public class KafkaKafkaTurnratio {
// 添加 Kafka Producer 配置
private static Properties producerProps() {
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.0.70:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.ACKS_CONFIG, "-1");
props.put(ProducerConfig.RETRIES_CONFIG, "3");
props.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
props.put(ProducerConfig.LINGER_MS_CONFIG, "1");
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
return props;
}
public static void main(String[] args) {
Properties prop = new Properties();
prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.0.70:9092");
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// 每一个消费,都要定义不同的Group_ID
prop.put(ConsumerConfig.GROUP_ID_CONFIG, "turnratio_group");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
consumer.subscribe(Collections.singleton("topic_internal_data_turn_ratio"));
ObjectMapper mapper = new ObjectMapper();
// 初始化 Kafka Producer
KafkaProducer<String, String> producer = new KafkaProducer<>(producerProps());
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
for (ConsumerRecord<String, String> record : records) {
try {
JsonNode rootNode = mapper.readTree(record.value());
System.out.println("原始数据"+rootNode);
String device_no = rootNode.get("deviceNo").asText();
String source_device_type = rootNode.get("sourceDeviceType").asText();
String sn = rootNode.get("sn").asText();
String model = rootNode.get("model").asText();
String create_time = rootNode.get("createTime").asText();
String cycle = rootNode.get("data").get("cycle").asText();
String volume_sum = rootNode.get("data").get("volumeSum").asText();
String speed_avg = rootNode.get("data").get("speedAvg").asText();
String volume_left = rootNode.get("data").get("volumeLeft").asText();
String speed_left = rootNode.get("data").get("speedLeft").asText();
String volume_straight = rootNode.get("data").get("volumeStraight").asText();
String speed_straight = rootNode.get("data").get("speedStraight").asText();
String volume_right = rootNode.get("data").get("volumeRight").asText();
String speed_right = rootNode.get("data").get("speedRight").asText();
String volume_turn = rootNode.get("data").get("volumeTurn").asText();
String speed_turn = rootNode.get("data").get("speedTurn").asText();
String outputLine = String.format("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s",
device_no, source_device_type, sn, model, create_time,cycle,volume_sum,speed_avg,volume_left,speed_left,volume_straight,speed_straight,volume_right,speed_right,volume_turn,speed_turn);
// 发送数据到 Kafka
ProducerRecord<String, String> producerRecord = new ProducerRecord<>("topic_db_data_turn_ratio", record.key(), outputLine);
producer.send(producerRecord, (RecordMetadata metadata, Exception e) -> {
if (e != null) {
e.printStackTrace();
} else {
System.out.println("The offset of the record we just sent is: " + metadata.offset());
}
});
} catch (Exception e) {
e.printStackTrace();
}
}
consumer.commitAsync();
}
}
}
剩下的几步参考二百五十九、Java——采集Kafka数据,解析成一条条数据,写入另一Kafka中(一般JSON)这篇博客

2387

被折叠的 条评论
为什么被折叠?



