hbase bulk load demo

数据免费下载地址

Mapper代码

package bulkload;

import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class BankRecordMapper extends Mapper<Long, Text, ImmutableBytesWritable, Put> {
    private  ImmutableBytesWritable k2 = new ImmutableBytesWritable();

    @Override
    protected void map(Long key, Text value, Context context) throws IOException, InterruptedException {
        String line = value.toString();
        if(line != null && !"".equalsIgnoreCase(line)){
            //2. 对数据进行切割操作
            String[] fields = line.split(",");
            //3. 封装k2 和 v2的数据
            //封装 k2
            String rowkeyStr = fields[0];
            k2.set(rowkeyStr.getBytes());

            // 封装v2
            Put v2 = new Put(rowkeyStr.getBytes());
            v2.addColumn("C1".getBytes(),"code".getBytes(),fields[1].getBytes());
            v2.addColumn("C1".getBytes(),"rec_account".getBytes(),fields[2].getBytes());
            v2.addColumn("C1".getBytes(),"rec_bank_name".getBytes(),fields[3].getBytes());
            v2.addColumn("C1".getBytes(),"rec_name".getBytes(),fields[4].getBytes());
            v2.addColumn("C1".getBytes(),"pay_account".getBytes(),fields[5].getBytes());
            v2.addColumn("C1".getBytes(),"pay_name".getBytes(),fields[6].getBytes());
            v2.addColumn("C1".getBytes(),"pay_comments".getBytes(),fields[7].getBytes());
            v2.addColumn("C1".getBytes(),"pay_channel".getBytes(),fields[8].getBytes());
            v2.addColumn("C1".getBytes(),"pay_way".getBytes(),fields[9].getBytes());
            v2.addColumn("C1".getBytes(),"status".getBytes(),fields[10].getBytes());
            v2.addColumn("C1".getBytes(),"timestamp".getBytes(),fields[11].getBytes());
            v2.addColumn("C1".getBytes(),"money".getBytes(),fields[12].getBytes());


            //4. 输出
            context.write(k2,v2);

        }
    }
}

job 代码

package bulkload;



import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

import java.io.IOException;

public class BulkloaderDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Job job = Job.getInstance();


        job.setInputFormatClass(TextInputFormat.class);
        job.setMapperClass(BankRecordMapper.class);
        TextInputFormat.setInputPaths(job,new Path("data/bank_record.csv"));
        job.setMapOutputKeyClass(ImmutableBytesWritable.class);
        job.setOutputValueClass(Put.class);

        job.setOutputFormatClass(HFileOutputFormat2.class);


        job.setNumReduceTasks(0);
        HFileOutputFormat2.setOutputPath(job,new Path("hdfs://node1:8020/bulkLoad/output"));


        Configuration conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.quorum","node1:2181,node2:2181,node3:2181");
        Connection conn = ConnectionFactory.createConnection(conf);
        Table table = conn.getTable(TableName.valueOf("BANK_RECORD:RECORD"));
        HFileOutputFormat2.configureIncrementalLoad(job,table,conn.getRegionLocator(TableName.valueOf("BANK_RECORD:RECORD")));


        //4. 提交任務
        boolean flag = job.waitForCompletion(true);

        //5. 退出程序
        System.exit(flag ? 0 : 1);

    }
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小手追梦

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值