kerbrose集群中spark任务读写hbase

项目中遇到集群开启kerbrose,需要spark读写hbase,刚开始仅仅做如下配置来获取connection

val configuration = HBaseConfiguration.create
...(设置kerbrose配置)
UserGroupInformation.setConfiguration(configuration)
UserGroupInformation.loginUserFromKeytab(prop.KERBEROSE_PRINCIPAL, keyTab)
val connection: Connection = ConnectionFactory.createConnection(configuration)

这样获取hbase连接如果任务仅在本地运行是可以成功读取hbase的,但我的任务需要在spark excutor读写hbase,此时这样用就报错:

,后来花了两三天各种查资料调试才解决该问题:

主要是要用

val connection: Connection = ConnectionFactory.createConnection(config, loginedUser)

来获取链接,现把代码贴上:


import java.io.File

import ***.DmpProperties
import org.apache.commons.logging.LogFactory
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.security.User
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase._
import org.apache.hadoop.hbase.client._
import org.apache.spark.SparkFiles
import scala.collection.JavaConversions._
import org.apache.hadoop.security.UserGroupInformation

/**
 */
object HbaseUtilNew{
  private val log = LogFactory.getLog(this.getClass)

  def hbaseConfig(prop: DmpProperties): Configuration ={
    try{
      val configuration = HBaseConfiguration.create
      configuration.set(HConstants.ZOOKEEPER_QUORUM, prop.HBASE_ZOOKEEPER_QUORUM)
      configuration.set(HConstants.ZOOKEEPER_CLIENT_PORT, prop.HBASE_ZOOKEEPER_PROPERTY_CLIENT_PORT)
      configuration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, prop.HBASE_ZOOKEEPER_ZNODE_PARENT)

      if(prop.IS_KERBEROSE){
        configuration.set("hadoop.security.authentication", "kerberos")
        configuration.set("hbase.security.authentication", "kerberos")
        configuration.set("hbase.security.authorization", "true")
        configuration.set("hbase.master.kerberos.principal", prop.KERBEROSE_MASTER_PRINCIPAL)
        configuration.set("hbase.regionserver.kerberos.principal", prop.KERBEROSE_REGIONSERVER_PRINCIPAL)

        getHBaseAuthentication(configuration, prop)
//        val userGroupInformation = UserGroupInformation.loginUserFromKeytabAndReturnUGI(prop.KERBEROSE_PRINCIPAL, keyTab)
//        UserGroupInformation.setLoginUser(userGroupInformation)
      }
      configuration
    }catch {
      case ex:Exception => ex.printStackTrace()
        throw new IllegalArgumentException("init hbase error", ex)
    }
  }

  def getHBaseAuthentication(configuration: Configuration,prop:DmpProperties): Unit ={
    if(prop.IS_KERBEROSE){
      LogUtil.info(log,"getHBaseAuthentication...")
      UserGroupInformation.setConfiguration(configuration)
      val keyTab = SparkFiles.get(new File(prop.KERBEROSE_KEYTAB).getName)

      LogUtil.info(log,"excutor keytab: "+keyTab)
      UserGroupInformation.loginUserFromKeytab(prop.KERBEROSE_PRINCIPAL, keyTab)
      LogUtil.info(log,"getHBaseAuthentication over")
    }
  }



  def getConnection(prop: DmpProperties): Connection ={
    try{
      LogUtil.info(log,"create Hbase Connection...")
      val config: Configuration = hbaseConfig(prop)
      val loginedUser: User = getAuthenticatedUser(config,prop)
      val connection: Connection = ConnectionFactory.createConnection(config, loginedUser)
      LogUtil.info(log,s"hbase connection created, loginedUser: ${loginedUser.getName}")

      connection
    }catch {
      case ex:Exception => ex.printStackTrace()
        throw new IllegalArgumentException("===== get hbase connect failed", ex)
    }
  }



  def getAuthenticatedUser(configuration: Configuration, prop:DmpProperties): User ={
    getHBaseAuthentication(configuration, prop);
    try{
      val loginedUser: User = User.create(UserGroupInformation.getLoginUser())
      LogUtil.info(log,s"====getAuthenticatedUser: ${loginedUser.getName}")
      loginedUser
    }catch {
      case ex:Exception => ex.printStackTrace()
        throw new IllegalArgumentException("===== getAuthenticatedUser failed", ex)
    }
  }

}



package org.apache.hadoop.hbase.mapreduce;


import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Properties;

import ***.DmpProperties;
import ***.HbaseUtilNew;
import ***.LogUtil;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.util.StringUtils;


/**
 * Convert HBase tabular data into a format that is consumable by Map/Reduce.
 * 此类继承了TableInputFormatBase,重写了其中的connection方法,获得经过认证的user,即可对hbase进行读操作
 */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MyTableInputFormat extends TableInputFormatBase
        implements Configurable {

    @SuppressWarnings("hiding")
    private static final Log LOG = LogFactory.getLog(MyTableInputFormat.class);

    /** Job parameter that specifies the input table. */
    public static final String INPUT_TABLE = "hbase.mapreduce.inputtable";
    /**
     * If specified, use start keys of this table to split.
     * This is useful when you are preparing data for bulkload.
     */
    private static final String SPLIT_TABLE = "hbase.mapreduce.splittable";
    /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified.
     * {@link TableMapReduceUtil#convertScanToString(Scan)} for more details.
     */
    public static final String SCAN = "hbase.mapreduce.scan";
    /** Scan start row */
    public static final String SCAN_ROW_START = "hbase.mapreduce.scan.row.start";
    /** Scan stop row */
    public static final String SCAN_ROW_STOP = "hbase.mapreduce.scan.row.stop";
    /** Column Family to Scan */
    public static final String SCAN_COLUMN_FAMILY = "hbase.mapreduce.scan.column.family";
    /** Space delimited list of columns and column families to scan. */
    public static final String SCAN_COLUMNS = "hbase.mapreduce.scan.columns";
    /** The timestamp used to filter columns with a specific timestamp. */
    public static final String SCAN_TIMESTAMP = "hbase.mapreduce.scan.timestamp";
    /** The starting timestamp used to filter columns with a specific range of versions. */
    public static final String SCAN_TIMERANGE_START = "hbase.mapreduce.scan.timerange.start";
    /** The ending timestamp used to filter columns with a specific range of versions. */
    public static final String SCAN_TIMERANGE_END = "hbase.mapreduce.scan.timerange.end";
    /** The maximum number of version to return. */
    public static final String SCAN_MAXVERSIONS = "hbase.mapreduce.scan.maxversions";
    /** Set to false to disable server-side caching of blocks for this scan. */
    public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks";
    /** The number of rows for caching that will be passed to scanners. */
    public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows";
    /** Set the maximum number of values to return for each call to next(). */
    public static final String SCAN_BATCHSIZE = "hbase.mapreduce.scan.batchsize";
    /** Specify if we have to shuffle the map tasks. */
    public static final String SHUFFLE_MAPS = "hbase.mapreduce.inputtable.shufflemaps";

    /** The configuration. */
    private Configuration conf = null;

    /** The kerberos authenticated user*/
    private User user;

    /**
     * Returns the current configuration.
     *
     * @return The current configuration.
     * @see Configurable#getConf()
     */
    @Override
    public Configuration getConf() {
        return conf;
    }

    /**
     * Sets the configuration. This is used to set the details for the table to
     * be scanned.
     *
     * @param configuration  The configuration to set.
     * @see Configurable#setConf(
     *   Configuration)
     */
    @Override
    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
            justification="Intentional")
    public void setConf(Configuration configuration) {
        this.conf = configuration;
        //=========get kerberos authentication before create hbase connection==========
        //注意获取用户之前要再次认证
        user = HbaseUtilNew.getAuthenticatedUser(conf, DmpProperties.getInstance());
        LogUtil.info(LOG, "user:" + user.getName());
        //=============================================================================

        Scan scan = null;

        if (conf.get(SCAN) != null) {
            try {
                scan = TableMapReduceUtil.convertStringToScan(conf.get(SCAN));
            } catch (IOException e) {
                LOG.error("An error occurred.", e);
            }
        } else {
            try {
                scan = new Scan();

                if (conf.get(SCAN_ROW_START) != null) {
                    scan.setStartRow(Bytes.toBytes(conf.get(SCAN_ROW_START)));
                }

                if (conf.get(SCAN_ROW_STOP) != null) {
                    scan.setStopRow(Bytes.toBytes(conf.get(SCAN_ROW_STOP)));
                }

                if (conf.get(SCAN_COLUMNS) != null) {
                    addColumns(scan, conf.get(SCAN_COLUMNS));
                }

                if (conf.get(SCAN_COLUMN_FAMILY) != null) {
                    scan.addFamily(Bytes.toBytes(conf.get(SCAN_COLUMN_FAMILY)));
                }

                if (conf.get(SCAN_TIMESTAMP) != null) {
                    scan.setTimeStamp(Long.parseLong(conf.get(SCAN_TIMESTAMP)));
                }

                if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) {
                    scan.setTimeRange(
                            Long.parseLong(conf.get(SCAN_TIMERANGE_START)),
                            Long.parseLong(conf.get(SCAN_TIMERANGE_END)));
                }

                if (conf.get(SCAN_MAXVERSIONS) != null) {
                    scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
                }

                if (conf.get(SCAN_CACHEDROWS) != null) {
                    scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
                }

                if (conf.get(SCAN_BATCHSIZE) != null) {
                    scan.setBatch(Integer.parseInt(conf.get(SCAN_BATCHSIZE)));
                }

                // false by default, full table scans generate too much BC churn
                scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
            } catch (Exception e) {
                LOG.error(StringUtils.stringifyException(e));
            }
        }

        setScan(scan);
    }

    @Override
    protected void initialize(JobContext context) throws IOException {
        // Do we have to worry about mis-matches between the Configuration from setConf and the one
        // in this context?
        TableName tableName = TableName.valueOf(conf.get(INPUT_TABLE));
        try {
            //====================add authenticated user ===================
            initializeTable(ConnectionFactory.createConnection(new Configuration(conf),user), tableName);
        } catch (Exception e) {
            LOG.error(StringUtils.stringifyException(e));
        }
    }

    /**
     * Parses a combined family and qualifier and adds either both or just the
     * family in case there is no qualifier. This assumes the older colon
     * divided notation, e.g. "family:qualifier".
     *
     * @param scan The Scan to update.
     * @param familyAndQualifier family and qualifier
     * @throws IllegalArgumentException When familyAndQualifier is invalid.
     */
    private static void addColumn(Scan scan, byte[] familyAndQualifier) {
        byte [][] fq = KeyValue.parseColumn(familyAndQualifier);
        if (fq.length == 1) {
            scan.addFamily(fq[0]);
        } else if (fq.length == 2) {
            scan.addColumn(fq[0], fq[1]);
        } else {
            throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
        }
    }

    /**
     * Adds an array of columns specified using old format, family:qualifier.
     * <p>
     * Overrides previous calls to {@link Scan#addColumn(byte[], byte[])}for any families in the
     * input.
     *
     * @param scan The Scan to update.
     * @param columns array of columns, formatted as <code>family:qualifier</code>
     * @see Scan#addColumn(byte[], byte[])
     */
    public static void addColumns(Scan scan, byte [][] columns) {
        for (byte[] column : columns) {
            addColumn(scan, column);
        }
    }

    /**
     * Calculates the splits that will serve as input for the map tasks. The
     * number of splits matches the number of regions in a table. Splits are shuffled if
     * required.
     * @param context  The current job context.
     * @return The list of input splits.
     * @throws IOException When creating the list of splits fails.
     * @see org.apache.hadoop.mapreduce.InputFormat#getSplits(
     *   JobContext)
     */
    @Override
    public List<InputSplit> getSplits(JobContext context) throws IOException {
        List<InputSplit> splits = super.getSplits(context);
        if ((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase())) {
            Collections.shuffle(splits);
        }
        return splits;
    }

    /**
     * Convenience method to parse a string representation of an array of column specifiers.
     *
     * @param scan The Scan to update.
     * @param columns  The columns to parse.
     */
    private static void addColumns(Scan scan, String columns) {
        String[] cols = columns.split(" ");
        for (String col : cols) {
            addColumn(scan, Bytes.toBytes(col));
        }
    }

    @Override
    protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
        if (conf.get(SPLIT_TABLE) != null) {
            TableName splitTableName = TableName.valueOf(conf.get(SPLIT_TABLE));
            //====================add authenticated user ===================
            try (Connection conn = ConnectionFactory.createConnection(getConf(),user)) {
                try (RegionLocator rl = conn.getRegionLocator(splitTableName)) {
                    return rl.getStartEndKeys();
                }
            }
        }

        return super.getStartEndKeys();
    }

    /**
     * Sets split table in map-reduce job.
     */
    public static void configureSplitTable(Job job, TableName tableName) {
        job.getConfiguration().set(SPLIT_TABLE, tableName.getNameAsString());
    }
}
/**
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hbase.mapreduce;

import java.io.IOException;

import ***.DmpProperties;
import ***.HbaseUtilNew;
import ***.LogUtil;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

/**
 * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored
 * while the output value <u>must</u> be either a {@link Put} or a
 * {@link Delete} instance.
 */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MyTableOutputFormat<KEY> extends OutputFormat<KEY, Mutation>
        implements Configurable {

    private static final Log LOG = LogFactory.getLog(MyTableOutputFormat.class);

    /** Job parameter that specifies the output table. */
    public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";

    /**
     * Optional job parameter to specify a peer cluster.
     * Used specifying remote cluster when copying between hbase clusters (the
     * source is picked up from <code>hbase-site.xml</code>).
     * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, Class, String, String, String)
     */
    public static final String QUORUM_ADDRESS = "hbase.mapred.output.quorum";

    /** Optional job parameter to specify peer cluster's ZK client port */
    public static final String QUORUM_PORT = "hbase.mapred.output.quorum.port";

    /** Optional specification of the rs class name of the peer cluster */
    public static final String
            REGION_SERVER_CLASS = "hbase.mapred.output.rs.class";
    /** Optional specification of the rs impl name of the peer cluster */
    public static final String
            REGION_SERVER_IMPL = "hbase.mapred.output.rs.impl";

    /** The configuration. */
    private Configuration conf = null;

    /** The kerberos authenticated user*/
    private User user;

    /**
     * Writes the reducer output to an HBase table.
     */
    protected class TableRecordWriter
            extends RecordWriter<KEY, Mutation> {

        private Connection connection;
        private BufferedMutator mutator;

        /**
         * @throws IOException
         *
         */
        public TableRecordWriter() throws IOException {
            String tableName = conf.get(OUTPUT_TABLE);
            //====================add authenticated user ===================
            this.connection = ConnectionFactory.createConnection(conf,user);
            this.mutator = connection.getBufferedMutator(TableName.valueOf(tableName));
            LOG.info("Created table instance for "  + tableName);
        }
        /**
         * Closes the writer, in this case flush table commits.
         *
         * @param context  The context.
         * @throws IOException When closing the writer fails.
         * @see RecordWriter#close(TaskAttemptContext)
         */
        @Override
        public void close(TaskAttemptContext context)
                throws IOException {
            mutator.close();
            connection.close();
        }

        /**
         * Writes a key/value pair into the table.
         *
         * @param key  The key.
         * @param value  The value.
         * @throws IOException When writing fails.
         * @see RecordWriter#write(Object, Object)
         */
        @Override
        public void write(KEY key, Mutation value)
                throws IOException {
            if (!(value instanceof Put) && !(value instanceof Delete)) {
                throw new IOException("Pass a Delete or a Put");
            }
            mutator.mutate(value);
        }
    }

    /**
     * Creates a new record writer.
     *
     * @param context  The current task context.
     * @return The newly created writer instance.
     * @throws IOException When creating the writer fails.
     * @throws InterruptedException When the jobs is cancelled.
     */
    @Override
    public RecordWriter<KEY, Mutation> getRecordWriter(TaskAttemptContext context)
            throws IOException, InterruptedException {
        return new TableRecordWriter();
    }

    /**
     * Checks if the output target exists.
     *
     * @param context  The current context.
     * @throws IOException When the check fails.
     * @throws InterruptedException When the job is aborted.
     * @see OutputFormat#checkOutputSpecs(JobContext)
     */
    @Override
    public void checkOutputSpecs(JobContext context) throws IOException,
            InterruptedException {
        // TODO Check if the table exists?

    }

    /**
     * Returns the output committer.
     *
     * @param context  The current context.
     * @return The committer.
     * @throws IOException When creating the committer fails.
     * @throws InterruptedException When the job is aborted.
     * @see OutputFormat#getOutputCommitter(TaskAttemptContext)
     */
    @Override
    public OutputCommitter getOutputCommitter(TaskAttemptContext context)
            throws IOException, InterruptedException {
        return new TableOutputCommitter();
    }

    @Override
    public Configuration getConf() {
        return conf;
    }

    @Override
    public void setConf(Configuration otherConf) {
        this.conf = HBaseConfiguration.create(otherConf);

        //=========get kerberos authentication before create hbase connection==========
        user = HbaseUtilNew.getAuthenticatedUser(conf, DmpProperties.getInstance());
        LogUtil.info(LOG, "user:" + user.getName());

        String tableName = this.conf.get(OUTPUT_TABLE);
        if(tableName == null || tableName.length() <= 0) {
            throw new IllegalArgumentException("Must specify table name");
        }

        String address = this.conf.get(QUORUM_ADDRESS);
        int zkClientPort = this.conf.getInt(QUORUM_PORT, 0);
        String serverClass = this.conf.get(REGION_SERVER_CLASS);
        String serverImpl = this.conf.get(REGION_SERVER_IMPL);

        try {
            if (address != null) {
                ZKUtil.applyClusterKeyToConf(this.conf, address);
            }
            if (serverClass != null) {
                this.conf.set(HConstants.REGION_SERVER_IMPL, serverImpl);
            }
            if (zkClientPort != 0) {
                this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
            }
        } catch(IOException e) {
            LOG.error(e);
            throw new RuntimeException(e);
        }
    }
}
package hbase

import java.io.IOException

import ***.DmpProperties
import ***.util._
import org.apache.commons.logging.LogFactory
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{Connection, Put}
import org.apache.hadoop.hbase.mapreduce.{MyTableOutputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.client.{Get, Put, Result}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}


object HbaseWriteTest {
  private val log = LogFactory.getLog(this.getClass)

  def main(args: Array[String]) {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
    Logger.getLogger("org.apache.zookeeper").setLevel(Level.WARN)
    Logger.getLogger("org.apache.hive").setLevel(Level.WARN)

    println("------------------ main start ------------------")
    val dmpProperties = DmpProperties.getInstance()

    val conf = new SparkConf()
      .setAppName("HbaseWriteTes")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    if(!CommonUtil.isLinux){
      conf.setMaster("local[1]")
    }

    val sc = new SparkContext(conf)
    if(dmpProperties.IS_KERBEROSE){
      val driver_keytab = dmpProperties.KERBEROSE_KEYTAB
      sc.addFile(driver_keytab)
    }

    val job = {
      try {
        val hbaseConf = HbaseUtilNew.hbaseConfig(dmpProperties)
        hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, dmpProperties.HBASE_USER_DEVICEID_TABLE)
        lazy val job = Job.getInstance(hbaseConf)
        job.setOutputKeyClass(classOf[ImmutableBytesWritable])
        job.setOutputValueClass(classOf[Result])
        job.setOutputFormatClass(classOf[MyTableOutputFormat[ImmutableBytesWritable]])
        job
      } catch {
        case ex: IOException => throw new Exception("init hbase error", ex)
      }
    }

    val aa=args(0)
    val bb=args(1)

//    val seq = List("r1|cf1|v1","r2|cf1|v2","r3|cf1|v3","r4|cf1|v4")
    val seq = List(aa,bb)
    val rdd: RDD[String] = sc.parallelize(seq)
    val putRdd = rdd.map(record => {

      LogUtil.info(log,s"record: ${record}")

      val split: Array[String] = record.split("\\|")
      val rowkey = split(0)
      val cf = split(1)
      val value = split(2)

      LogUtil.info(log,s"rowkey: ${rowkey}, cf: ${cf}, value: ${value},")
      val put: Put = new Put(Bytes.toBytes(rowkey))
      put.addColumn(Bytes.toBytes(cf), Bytes.toBytes("c1"), Bytes.toBytes(value))

      (new ImmutableBytesWritable, put)
    })
    putRdd.saveAsNewAPIHadoopDataset(job.getConfiguration)

    sc.stop()

  }

}
package hbase

import ***.DmpProperties
import ***.util._
import org.apache.commons.logging.LogFactory
import org.apache.hadoop.hbase.CellUtil
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{MyTableInputFormat, TableInputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

object HbaseReadTest {
  private val log = LogFactory.getLog(this.getClass)

  def main(args: Array[String]) {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
    Logger.getLogger("org.apache.zookeeper").setLevel(Level.WARN)
    Logger.getLogger("org.apache.hive").setLevel(Level.WARN)

    println("------------------ main start ------------------")
    val dmpProperties = DmpProperties.getInstance()

    val conf = new SparkConf()
      .setAppName("HbaseReadTes")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    if(!CommonUtil.isLinux){
      conf.setMaster("local[1]")
    }

    val sc = new SparkContext(conf)
    if(dmpProperties.IS_KERBEROSE){
      val driver_keytab = dmpProperties.KERBEROSE_KEYTAB
      sc.addFile(driver_keytab)
    }

    val hbaseConf = HbaseUtilNew.hbaseConfig(dmpProperties)
    hbaseConf.set(TableInputFormat.INPUT_TABLE, dmpProperties.HBASE_USER_DEVICEID_TABLE)
    val hbaseRdd = sc.newAPIHadoopRDD(hbaseConf, classOf[MyTableInputFormat],
      classOf[ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])
    import scala.collection.JavaConversions._

    val keyValueRdd = hbaseRdd.map(_._2).map{ result =>
      val hbaseValue = result.listCells().map(cell => {
        val rowkey = Bytes.toString(CellUtil.cloneRow(cell))
        val column = Bytes.toString(CellUtil.cloneQualifier(cell))
        val value = Bytes.toString(CellUtil.cloneValue(cell))
        (rowkey, column, value)
      }).toSet
      hbaseValue
    }.flatMap(x => x)

    val collect: Array[(String, String, String)] = keyValueRdd.collect()
    println("============== size: "+collect.size)
    println("============== collect(0): "+collect(0))


    sc.stop()

  }

}
用MyTableInputFormat 和 MyTableOutputFormat 替换 TableInputFormat、TableOutputFormat

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值