Mac idea HDFS to HBase

  • 使用mapreduce将hdfs的数据导入到hbase数据库中

  • hbase需要先创建表,列族

  • 不需要导入Hadoop那个4个配置文件,千万不要导入

  • conf.set(“fs.defaultFS”, “hdfs://hadoop01.blk5.cn:9000”); 不能写高可用地址,写namenode active地址

pmo.xml

<?xml version="1.0" encoding="UTF-8"?>

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>

  <groupId>cn.blk5</groupId>
  <artifactId>hbase001</artifactId>
  <version>1.0-SNAPSHOT</version>

  <name>hbase001</name>
  <!-- FIXME change it to the project's website -->
  <url>http://www.example.com</url>

  <properties>
    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    <maven.compiler.source>1.7</maven.compiler.source>
    <maven.compiler.target>1.7</maven.compiler.target>
  </properties>

  <dependencies>


    <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client -->
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-client</artifactId>
      <version>2.7.7</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common -->
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-common</artifactId>
      <version>2.7.7</version>
    </dependency>


    <!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
    <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-client</artifactId>
      <version>1.2.6</version>
    </dependency>


    <!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-mapreduce -->
    <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-mapreduce</artifactId>
      <version>2.1.4</version>
    </dependency>




    <dependency>
      <groupId>junit</groupId>
      <artifactId>junit</artifactId>
      <version>4.11</version>
      <scope>test</scope>
    </dependency>
  </dependencies>

  <build>
    <pluginManagement><!-- lock down plugins versions to avoid using Maven defaults (may be moved to parent pom) -->
      <plugins>
        <!-- clean lifecycle, see https://maven.apache.org/ref/current/maven-core/lifecycles.html#clean_Lifecycle -->
        <plugin>
          <artifactId>maven-clean-plugin</artifactId>
          <version>3.1.0</version>
        </plugin>
        <!-- default lifecycle, jar packaging: see https://maven.apache.org/ref/current/maven-core/default-bindings.html#Plugin_bindings_for_jar_packaging -->
        <plugin>
          <artifactId>maven-resources-plugin</artifactId>
          <version>3.0.2</version>
        </plugin>
        <plugin>
          <artifactId>maven-compiler-plugin</artifactId>
          <version>3.8.0</version>
        </plugin>
        <plugin>
          <artifactId>maven-surefire-plugin</artifactId>
          <version>2.22.1</version>
        </plugin>
        <plugin>
          <artifactId>maven-jar-plugin</artifactId>
          <version>3.0.2</version>
        </plugin>
        <plugin>
          <artifactId>maven-install-plugin</artifactId>
          <version>2.5.2</version>
        </plugin>
        <plugin>
          <artifactId>maven-deploy-plugin</artifactId>
          <version>2.8.2</version>
        </plugin>
        <!-- site lifecycle, see https://maven.apache.org/ref/current/maven-core/lifecycles.html#site_Lifecycle -->
        <plugin>
          <artifactId>maven-site-plugin</artifactId>
          <version>3.7.1</version>
        </plugin>
        <plugin>
          <artifactId>maven-project-info-reports-plugin</artifactId>
          <version>3.0.0</version>
        </plugin>
      </plugins>
    </pluginManagement>
  </build>
</project>

java代码

package cn.blk5;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import java.io.IOException;

public class MTH {

    //95002,刘晨,女,19,IS
    static class MTH_Mapper extends Mapper<LongWritable, Text, IntWritable, Text> {

        IntWritable k = new IntWritable();
        Text v = new Text();

        @Override
        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, IntWritable, Text>.Context context) throws IOException, InterruptedException {
            String[] split = value.toString().split(",");
            if (split.length == 5) {
                k.set(Integer.parseInt(split[0]));
                v.set(split[1] + "\t" + split[2] + "\t" + split[3] + "\t" + split[4] + "\t");
                context.write(k, v);
            }
        }
    }

    static class MTH_Reduce extends TableReducer<IntWritable, Text, NullWritable> {

        @Override
        protected void reduce(IntWritable key, Iterable<Text> values, Reducer<IntWritable, Text, NullWritable, Mutation>.Context context) throws IOException, InterruptedException {


            for (Text v : values) {
                String[] datas = v.toString().split("\t");
                Put p = new Put((key.get() + "").getBytes());
                p.addColumn("base_info".getBytes(), "name".getBytes(), datas[0].getBytes());
                p.addColumn("base_info".getBytes(), "sex".getBytes(), datas[1].getBytes());
                p.addColumn("base_info".getBytes(), "age".getBytes(), datas[2].getBytes());
                p.addColumn("job_info".getBytes(), "dept".getBytes(), datas[3].getBytes());


                context.write(NullWritable.get(), p);


            }


        }
    }


    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {


        System.setProperty("HADOOP_USER_NAME", "hadoop");
        //System.setProperty("hadoop.home.dir", "/Users/liwei/Downloads/hadoop-2.7.7");
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://hadoop01.blk5.cn:9000");
        conf.set("hbase.zookeeper.quorum", "hadoop01.blk5.cn:2181,hadoop02.blk5.cn:2181,hadoop03.blk5.cn:2181");

        Job job = Job.getInstance(conf);

        job.setJarByClass(cn.blk5.MTH.class);
        job.setMapperClass(MTH_Mapper.class);

        /**
         * 参数1:表名
         * 参数2:reduce类
         * 参数3:job对象
         */

//        TableMapReduceUtil.initTableReducerJob("hdfs_hbase", MTH_Reduce.class, job);
        // 调用8个参数的重写类
        TableMapReduceUtil.initTableReducerJob("hdfs_hbase", MTH_Reduce.class, job, null, null, null, null, false);


        job.setMapOutputKeyClass(IntWritable.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Mutation.class);


        FileInputFormat.addInputPath(job, new Path("/hb"));


        //提交
        job.waitForCompletion(true);


//        try {
//            System.exit(job.waitForCompletion(true) ? 0 : 1);
//
//        } catch (InterruptedException e) {
//            e.printStackTrace();
//        } catch (ClassNotFoundException e) {
//            e.printStackTrace();
//        }


    }
}


原始数据:保存一个txt文件,放到hdfs上即可

95002,刘晨,女,19,IS
95017,王风娟,女,18,IS
95018,王一,女,19,IS
95013,冯伟,男,21,CS
95014,王小丽,女,19,CS
95019,邢小丽,女,19,IS
95020,赵钱,男,21,IS
95003,王敏,女,22,MA
95004,张立,男,19,IS
95012,孙花,女,20,CS
95010,孔小涛,男,19,CS
95005,刘刚,男,18,MA
95006,孙庆,男,23,CS
95007,易思玲,女,19,MA
95008,李娜,女,18,CS
95021,周二,男,17,MA
95022,郑明,男,20,MA
95001,李勇,男,20,CS
95011,包小柏,男,18,MA
95009,梦圆圆,女,18,MA
95015,王君,男,18,MA
95016,钱国,男,21,MA

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值