SCALA操作HBASE2.1

package scala

import java.io.IOException
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter
import org.apache.hadoop.hbase.filter.SubstringComparator
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase._
import java.util.UUID
import org.slf4j.LoggerFactory

object ScalaHbase {
  def LOG = LoggerFactory.getLogger(getClass)

  def getHbaseConf: Configuration = {
    val conf: Configuration = HBaseConfiguration.create
    //hbase-site.xml配置文件拷贝到工程中来,注意应用程序的相对路径
    conf.addResource("../hbase-site.xml")
    /*conf.set("hbase.zookeeper.property.clientPort", "2181")
    //conf.set("spark.executor.memory", "3000m")
    conf.set("hbase.zookeeper.quorum", "jzd.note1.com,jzd.note2.com,jzd.note3.com")
    conf.set("hbase.master", "jzd.note1.com:60000")
    //conf.set("hbase.rootdir", Constant.HBASE_ROOTDIR)*/
    conf
  }

  @throws(classOf[MasterNotRunningException])
  @throws(classOf[ZooKeeperConnectionException])
  @throws(classOf[IOException])
  def createTable(hbaseconn: Connection, tablename: String, columnFamily: Array[String]) {
    //建立一个数据库操作对象
    var admin:Admin = hbaseconn.getAdmin();
    var myTableName :TableName = TableName.valueOf(tablename)
    if (admin.tableExists(myTableName)) {
      LOG.info(tablename+" Table exists!")
      //val tableDesc: HTableDescriptor = new HTableDescriptor(TableName.valueOf(tablename))
      //tableDesc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation")
    }else {
      val tableDesc: HTableDescriptor = new HTableDescriptor(myTableName)
      tableDesc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation")
      for ( i <- 0 to (columnFamily.length - 1)){
          val columnDesc: HColumnDescriptor = new HColumnDescriptor(columnFamily(i));
          tableDesc.addFamily(columnDesc);
      }
      admin.createTable(tableDesc)
      LOG.info(tablename+" create table success!")
    }
    admin.close
  }

  @throws(classOf[IOException])
  def addRow(table: Table, rowKey: String, columnFamily: String, key: String, value: String) {
    val rowPut: Put = new Put(Bytes.toBytes(rowKey))
    if (value == null) {
      rowPut.addColumn(columnFamily.getBytes, key.getBytes, "".getBytes)
    } else {
      rowPut.addColumn(columnFamily.getBytes, key.getBytes, value.getBytes)
    }
    table.put(rowPut)
  }

  @throws(classOf[IOException])
  def getRow(table: Table, rowKey: String): Result = {
    val get: Get = new Get(Bytes.toBytes(rowKey))
    val result: Result = table.get(get)
    for (rowKv <- result.rawCells()) {
      println("Family:"+new String(rowKv.getFamilyArray,rowKv.getFamilyOffset,rowKv.getFamilyLength,"UTF-8"))
      println("Qualifier:"+new String(rowKv.getQualifierArray,rowKv.getQualifierOffset,rowKv.getQualifierLength,"UTF-8"))
      println("Timestamp:"+rowKv.getTimestamp)
      println("rowKey:"+new String(rowKv.getRowArray,rowKv.getRowOffset,rowKv.getRowLength,"UTF-8"))
      println("Value:"+new String(rowKv.getValueArray,rowKv.getValueOffset,rowKv.getValueLength,"UTF-8"))
    }
    return result
  }

  def addDataBatch(table: Table, list: java.util.List[Put]) {
    try {
      table.put(list)
    }catch {
      case e: RetriesExhaustedWithDetailsException => {
        LOG.error(e.getMessage)
      }
      case e: IOException => {
        LOG.error(e.getMessage)
      }
    }
  }

  def queryAll(table: Table):ResultScanner ={
    val scan: Scan = new Scan
    try {
      val s = new Scan()
      val results:ResultScanner = table.getScanner(s)
      return results
    }catch {
      case e: IOException => {
        LOG.error(e.toString)
      }
    }
    return null
  }

  def queryBySingleColumn(table: Table, queryColumn: String, value: String, columns: Array[String]): ResultScanner = {
    if (columns == null || queryColumn == null || value == null) {
      return null
    }
    try {
      val filter: SingleColumnValueFilter = new SingleColumnValueFilter(Bytes.toBytes(queryColumn), Bytes.toBytes(queryColumn), CompareOp.EQUAL, new SubstringComparator(value))
      val scan: Scan = new Scan
      for (columnName <- columns) {
        scan.addColumn(Bytes.toBytes(columnName), Bytes.toBytes(columnName))
      }
      scan.setFilter(filter)
      return table.getScanner(scan)
    }catch {
      case e: Exception => {
        LOG.error(e.toString)
      }
    }
    return null
  }

  def dropTable(hbaseconn: Connection, tableName: String) {
    try {
      val admin: HBaseAdmin = hbaseconn.getAdmin.asInstanceOf[HBaseAdmin]
      admin.disableTable(TableName.valueOf(tableName))
      admin.deleteTable(TableName.valueOf(tableName))
    }catch {
      case e: MasterNotRunningException => {
        LOG.error(e.toString)
      }
      case e: ZooKeeperConnectionException => {
        LOG.error(e.toString)
      }
      case e: IOException => {
        LOG.error(e.toString)
      }
    }
  }

  @throws(classOf[Exception])
  def main(args: Array[String]) {
    val conf: Configuration = ScalaHbase.getHbaseConf
    val conn = ConnectionFactory.createConnection(conf)
    //表格名称test
    var table:Table = conn.getTable(TableName.valueOf("test"));
    try {
      //列族fam1、fam2
      val familyColumn: Array[String] = Array[String]("fam1", "fam2")
      ScalaHbase.createTable(conn,"test", familyColumn)

      val uuid: UUID = UUID.randomUUID
      val s_uuid: String = uuid.toString

      ScalaHbase.addRow(table, s_uuid, "fam1", "column1A", s_uuid+"_1A")
      ScalaHbase.addRow(table, s_uuid, "fam1", "column1B", s_uuid+"_1B")
      ScalaHbase.addRow(table, s_uuid, "fam2", "column2A", s_uuid+"_2A")
      ScalaHbase.addRow(table, s_uuid, "fam2", "column2B", s_uuid+"_2B")
      ScalaHbase.getRow(table, s_uuid)
      ScalaHbase.dropTable(conn,"test")
    }catch {
      case e: Exception => {
        if (e.getClass == classOf[MasterNotRunningException]) {
          System.out.println("MasterNotRunningException")
        }
        if (e.getClass == classOf[ZooKeeperConnectionException]) {
          System.out.println("ZooKeeperConnectionException")
        }
        if (e.getClass == classOf[IOException]) {
          System.out.println("IOException")
        }
        e.printStackTrace
      }
    } finally {
      if (null != table) {
        table.close
      }
    }
  }
}

 

Scala 中使用 HBase 和 Kerberos 进行身份验证需要进行以下步骤: 1. 配置 Kerberos:首先,确保 HBase 集群已经启用了 Kerberos 身份验证。您需要获取正确的 Kerberos Principal(主体)和 Keytab(密钥表)文件。 2. 添加 HBase 和 Kerberos依赖:在 Scala项目的构建文件(例如 build.sbt)中添加必要的依赖项。例如,您可以添加以下依赖项: ```scalalibraryDependencies += "org.apache.hbase" % "hbase-client" % "版本号" libraryDependencies += "org.apache.hadoop" % "hadoop-auth" % "版本号" ``` 请确保将版本号替换为您正在使用的 HBase 和 Hadoop 版本。 3. 创建 HBase 配置:在 Scala代码中,您需要创建一个 HBase 配置对象,并设置相应的属性,以使用 Kerberos 进行身份验证。例如: ```scalaimport org.apache.hadoop.conf.Configurationimport org.apache.hadoop.security.UserGroupInformationval conf = HBaseConfiguration.create() conf.set("hbase.zookeeper.quorum", "zkQuorum") conf.set("hbase.zookeeper.property.clientPort", "zkPort") conf.set("zookeeper.znode.parent", "/hbase") conf.set("hadoop.security.authentication", "kerberos") // 设置 Kerberos Principal 和 Keytab 文件路径val krbPrincipal = "your_kerberos_principal" val krbKeytab = "path_to_your_keytab_file" System.setProperty("java.security.krb5.conf", "/etc/krb5.conf") UserGroupInformation.setConfiguration(conf) UserGroupInformation.loginUserFromKeytab(krbPrincipal, krbKeytab) ``` 请确保将 "zkQuorum" 和 "zkPort" 替换为您的 ZooKeeper 地址和端口。 4. 使用 HBase 进行操作:现在,您可以使用 ScalaHBase 客户端库进行各种操作,例如读取和写入数据。根据您的需求,使用适当的 HBase API 进行操作。 请注意,这只是一个简单的示例,您可能需要针对您的特定环境和需求进行更多的配置和调整。 希望这可以帮助您在 Scala 中使用 HBase 和 Kerberos 进行身份验证!如果您有任何进一步的问题,请随时提问。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值