spark-写入到hbase

pom

 <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.11</artifactId>
            <version>${spark.version}</version>
            <exclusions>
                <exclusion>
                    <groupId>org.apache.hadoop</groupId>
                    <artifactId>hadoop-client</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>org.apache.hadoop</groupId>
                    <artifactId>hadoop-hdfs</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>${hadoop.version}</version>
            <exclusions>
                <exclusion>
                    <artifactId>org.cloudera.logredactor</artifactId>
                    <groupId>logredactor</groupId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <version>${hbase.version}</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-it -->
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-it</artifactId>
            <version>2.1.0-cdh6.3.3</version>
            <scope>compile</scope>
        </dependency>
        <!--spark 支持hive -->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-hive_2.11</artifactId>
            <version>${spark.version}</version>
            <exclusions>
                <exclusion>
                    <artifactId>jackson-databind</artifactId>
                    <groupId>com.fasterxml.jackson.core</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>org.cloudera.logredactor</artifactId>
                    <groupId>logredactor</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>org.cloudera.logredactor</artifactId>
                    <groupId>logredactor</groupId>
                </exclusion>
            </exclusions>
            <!--<scope>provided</scope>-->
        </dependency>

java版本spark。

import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapred.TableOutputFormat;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapred.JobConf;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
import scala.Tuple3;

import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;

public class HiveToHbase {
    public static void main(String[] args) throws IOException {
        JavaSparkContext jsc = new JavaSparkContext("local[*]","11",new SparkConf());
        jsc.hadoopConfiguration().set("hbase.zookeeper.quorum ","xxx:2181,xxx:2181,xxx:2181");
        jsc.hadoopConfiguration().set("zookeeper.znode.parent","/hbase");
        jsc.hadoopConfiguration().set(TableOutputFormat.OUTPUT_TABLE,"jl:test_load");
        jsc.hadoopConfiguration().set( "mapreduce.output.fileoutputformat.outputdir", "/tmp/cc/"+ UUID.randomUUID());
        JobConf jobConf = new JobConf(jsc.hadoopConfiguration());
        jobConf.setOutputKeyClass(ImmutableBytesWritable.class);
        jobConf.setOutputValueClass(Put.class);
        jobConf.setOutputFormat(TableOutputFormat.class);
        List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
        JavaRDD<Integer> javaRDD = jsc.parallelize(data);
        javaRDD.map(n->new Tuple3<String,String,String>("key" + n, "code" + n, "level" + n))
                .mapToPair(t->{
                    ImmutableBytesWritable rowKey = new ImmutableBytesWritable(Bytes.toBytes(t._1()));
                    Put put = new Put(Bytes.toBytes(t._1()));
                    put.addColumn(Bytes.toBytes("0"), Bytes.toBytes("ou_code"), Bytes.toBytes(t._2()));
                    put.addColumn(Bytes.toBytes("0"), Bytes.toBytes("ou_level"), Bytes.toBytes(t._3()));
                    return new Tuple2(rowKey, put);
                }).saveAsHadoopDataset(jobConf);

        jsc.stop();
    }
}

最后结果

说明下。

hbase结果中rowkey乱码,value乱码那两行是之前读hive的 int double之后写入到hbase 乱码的。。。

。原因是 int类型的1 和字符类型的1 字节是不一样

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值