kafka topic 发送消息到 hbase

目录

一、配置kafka信息

二、配置hbase信息,连接hbase数据库

需要先在hbase创建对应的命令空间和table

Put对象,按照不同需求,从文件中截取对应字段

三、完整代码


一、配置kafka信息

        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.91.180:9092");
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");  // 手动提交
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");   // 自动提交时,提交时间
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "userfriend_group2");
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        // 配置Kafka信息。消费消息
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Collections.singleton("user_friends"));

二、配置hbase信息,连接hbase数据库

需要先在hbase创建对应的命令空间和table

Put对象,按照不同需求,从文件中截取对应字段

     // 配置Kafka信息。消费消息
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Collections.singleton("user_friends"));

        // 配置hbase信息,连接hbase数据库
        Configuration conf = HBaseConfiguration.create();
        conf.set(HConstants.HBASE_DIR, "hdfs://192.168.91.180:9000/hbase");
        conf.set(HConstants.ZOOKEEPER_QUORUM, "192.168.91.180");
        conf.set(HConstants.CLIENT_PORT_STR, "2181");
        Connection connection = null;
        try {
            connection = ConnectionFactory.createConnection(conf);
            Table userFriendTable = connection.getTable(TableName.valueOf("events_db:user_friend"));

            while (true) {
                ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(100));
                ArrayList<Put> datas = new ArrayList<>();
                for (ConsumerRecord<String, String> record : poll) {
                    System.out.println(record.value()); // userid,friendid
                    String[] split = record.value().split(",");
                    Put put = new Put(Bytes.toBytes((split[0] + split[1]).hashCode()));
                    put.addColumn("uf".getBytes(), "userid".getBytes(), split[0].getBytes());
                    put.addColumn("uf".getBytes(), "friendid".getBytes(), split[1].getBytes());
                    datas.add(put);
                }
                num = num + datas.size();
                System.out.println("----------num:" + num);
                if (datas.size() != 0)
                    userFriendTable.put(datas);

                Thread.sleep(10);
            }
        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

三、完整代码

package nj.zb.kb21.kafkatohb;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Properties;

public class UserFriendToHB {
    static int num = 0;

    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.91.180:9092");
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");  // 手动提交
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");   // 自动提交时,提交时间
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "userfriend_group2");
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        // 配置Kafka信息。消费消息
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Collections.singleton("user_friends"));

        // 配置hbase信息,连接hbase数据库
        Configuration conf = HBaseConfiguration.create();
        conf.set(HConstants.HBASE_DIR, "hdfs://192.168.91.180:9000/hbase");
        conf.set(HConstants.ZOOKEEPER_QUORUM, "192.168.91.180");
        conf.set(HConstants.CLIENT_PORT_STR, "2181");
        Connection connection = null;
        try {
            connection = ConnectionFactory.createConnection(conf);
            Table userFriendTable = connection.getTable(TableName.valueOf("events_db:user_friend"));

            while (true) {
                ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(100));
                ArrayList<Put> datas = new ArrayList<>();
                for (ConsumerRecord<String, String> record : poll) {
                    System.out.println(record.value()); // userid,friendid
                    String[] split = record.value().split(",");
                    Put put = new Put(Bytes.toBytes((split[0] + split[1]).hashCode()));
                    put.addColumn("uf".getBytes(), "userid".getBytes(), split[0].getBytes());
                    put.addColumn("uf".getBytes(), "friendid".getBytes(), split[1].getBytes());
                    datas.add(put);
                }
                num = num + datas.size();
                System.out.println("----------num:" + num);
                if (datas.size() != 0)
                    userFriendTable.put(datas);

                Thread.sleep(10);
            }
        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值