通过分区和offset拉取Kafka的数据

import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.Properties;

@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations = {"classpath:spring.xml"})
public class ConsumerTest {


    @Autowired
    private RedisService redisService;
    private static final Logger logger = LoggerFactory.getLogger(ConsumerTest.class);
    private static final String TOPIC = "BuB-0200";
    private KafkaConsumer<String, String> consumer;

    @Test
    public void testComsumer() {
        Properties props = new Properties();
        props.put("bootstrap.servers", "node-07.bigdata:6667,node-08.bigdata:6667,node-09.bigdata:6667,node-10.bigdata:6667");
        props.put("group.id", "1");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        consumer = new KafkaConsumer<String, String>(props);

        try {
            //实例化Topic主题和分区数
            TopicPartition topicPartition = new TopicPartition(TOPIC,9);
            //分配消费者主题分区
            consumer.assign(Arrays.asList(topicPartition));
            //指定读取的offset偏移位置
            consumer.seek(topicPartition,30518019l);

            while (true) {
                //循环的拉取Kafka的下一条数据
                ConsumerRecords<String, String> records = consumer.poll(1);
                for (ConsumerRecord<String, String> record : records) {
                    String value = record.value();
                    Long offset = record.offset();
                    Integer partition = record.partition();
                    Req_0200_Hbase req =  redisService.deserializeAsObject(value, Req_0200_Hbase.class);
                    HbaseRow hbaseRow =  req.getGps();
                    final Gps gpsPo = new Gps();
                    for (HbaseCell hbaseCell:hbaseRow.getHbaseCellList()){
                        try {
                            PropertyUtils.setProperty(gpsPo, hbaseCell.getQualifier(), Bytes.toString(hbaseCell.getValue()));
                        }catch (Exception e){
                            e.printStackTrace();
                            logger.error("qualifier:{},value:{}", hbaseCell.getQualifier(), Bytes.toString(hbaseCell.getValue()));
                        }

                    }
                    if(gpsPo.getSim() == 17000003507l){
                        String rowKey = hbaseRow.getRowKey();
                        String[] data = rowKey.split("_");
                        gpsPo.setVid(Long.valueOf(data[0]));
                        gpsPo.setGpsTime(new Date(Long.valueOf(data[1])+1483200000000l));
                        logger.info("接收到的GPS数据,partition:{},offset:{},sim:{},rowKey:{},gpsTime:{}",partition,offset,gpsPo.getSim(),rowKey,
                                new SimpleDateFormat("YYYY-MM-dd HH:mm:ss").format(gpsPo.getGpsTime()));
                        }
                }
            }
        } finally {
            consumer.close();
        }
    }

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值