Hbase JavaApi 批量获取数据(scan)和插入代码(put) 代码


import com.alibaba.fastjson.JSONObject;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;

import java.io.*;
import java.util.List;

    /**
     * 
     *scan
     * 
     * 
     * 
     * 
     */

public class test2 {
    public static void main(String[] args) {

        //hbase 连接


        try {
            Configuration HBASE_CONF;
            HBASE_CONF = HBaseConfiguration.create();
            HBASE_CONF.set("hbase.zookeeper.property.clientPort", "2181");
            HBASE_CONF.set("hbase.zookeeper.quorum", "192.168.xx.xx");
            HBASE_CONF.set("hbase.master", "192.168.xx.xx:60000");
            HBASE_CONF.set("zookeeper.znode.parent", "/hbase");
            HBASE_CONF.setInt("hbase.hconnection.threads.max", 100);
            HBASE_CONF.setInt("hbase.hconnection.threads.core", 50);
            HBASE_CONF.setLong("hbase.hconnection.threads.keepalivetime", 1000);

            ConnectionFactory.createConnection(HBASE_CONF);
            Connection hbaseConnection = ConnectionFactory.createConnection(HBASE_CONF);
            Table table = hbaseConnection.getTable(TableName.valueOf("0_library_token"));
            System.out.println("Table Name: " + table.getName());
            Scan          scan          = new Scan();


            scan.addColumn(Bytes.toBytes("F"), Bytes.toBytes("F"));
            scan.setMaxVersions(1111111111);//不设置默认,hbase只取row中的1个cell
            ResultScanner resultScanner = table.getScanner(scan);
            for (Result result : resultScanner) {
                List<Cell> cells = result.listCells();//hbase的一个row对应的所有cell
                byte[] row = result.getRow();
                String rowKey = Bytes.toString(row);//rowkey

                System.out.println("rowKey为"+rowKey);
                    for (Cell cell : cells) {//value
                        String jsonstr = Bytes.toString(CellUtil.cloneValue(cell));
                        //单个value中的cell
                        JSONObject jsonObject = JSONObject.parseObject(jsonstr);
                        System.out.println("path为" + jsonObject.toString );
                    }
                
                System.out.println("************************");

            }


        } catch (IOException e) {
            e.printStackTrace();
        }


    }

插入:

        Configuration HBASE_CONF;
        HBASE_CONF = HBaseConfiguration.create();
        HBASE_CONF.set("hbase.zookeeper.property.clientPort", "2181");
        HBASE_CONF.set("hbase.zookeeper.quorum", "192.168.xx.xx");
        HBASE_CONF.set("hbase.master", "192.168.xx.xx:60000");
        HBASE_CONF.set("zookeeper.znode.parent", "/hbase");
        HBASE_CONF.setInt("hbase.hconnection.threads.max", 100);
        HBASE_CONF.setInt("hbase.hconnection.threads.core", 50);
        HBASE_CONF.setLong("hbase.hconnection.threads.keepalivetime", 1000);        
try {
            ConnectionFactory.createConnection(HBASE_CONF);
            Connection hbaseConnection = ConnectionFactory.createConnection(HBASE_CONF);
            Table table = hbaseConnection.getTable(TableName.valueOf("0_file_pv"));

            


            Put put = new Put("rk0001".getBytes()); //指定rowkey
            put.addColumn("F".getBytes(), "F".getBytes(), "shandong".getBytes());
            
            //插入数据
            table.put(put);

            table.close();
            hbaseConnection.close();
            System.out.println("结束***");
        } catch (IOException e) {
            e.printStackTrace();
        }

包:

 <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>${hadoop.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <!--org.apache.hadoop.hbase.mapreduce-->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-core</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <version>${hbase.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-server</artifactId>
            <version>${hbase.version}</version>
        </dependency>

properties

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <hadoop.version>2.6.5</hadoop.version>
        <hbase.version>1.2.5</hbase.version>
    </properties>

注意hosts文件中的地址与连接地址保持一致

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值