[HBase API]------ClusterStatus/ServerLoad用法

本文档通过TestHBaseAPI类展示了如何使用HBase API获取集群状态(ClusterStatus)和服务器负载(ServerLoad)信息。包括获取版本、平均负载、区域数量、请求计数等关键数据,以及每个RegionServer的详细负载情况。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

TestHBaseAPI类

package com.huawei.bigdata.hbase.examples;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.math3.stat.clustering.Cluster;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus;
import org.apache.hadoop.io.VersionedWritable;

public class TestHBaseAPI {
  private final static Log LOG = LogFactory.getLog(HBaseSample.class.getName());
   private TableName tableName = null;
   private Configuration conf = null;
   private Connection conn = null;
   private Admin admin = null;
   public TestHBaseAPI(Configuration conf) throws IOException {
      this.conf = conf;
      this.tableName = TableName.valueOf("austin06");
      this.conn = ConnectionFactory.createConnection(conf);
    }
    public void testAPI() throws MasterNotRunningException, ZooKeeperConnectionException, IOException{
    TestClusterStatus();
   
   }
   @SuppressWarnings("null")
 public void TestClusterStatus() throws MasterNotRunningException, ZooKeeperConnectionException, IOException{
  
  //Initialize admin
  admin = conn.getAdmin();
  //Initialize ClusterStatus
  org.apache.hadoop.hbase.ClusterStatus clusterStatus = admin.getClusterStatus();
  //Execute method in class ClusterStatus
  System.out.println(clusterStatus.getVersion());
  System.out.println(clusterStatus.getAverageLoad());
  System.out.println(clusterStatus.getHBaseVersion());
  System.out.println(clusterStatus.getBackupMastersSize());
  System.out.println(clusterStatus.getClusterId());
  System.out.println(clusterStatus.getDeadServers());
  System.out.println(clusterStatus.getLastMajorCompactionTsForTable(tableName));
  System.out.println(clusterStatus.getRegionsCount());
  System.out.println(clusterStatus.getRequestsCount());
  System.out.println(clusterStatus.getServersSize());
  System.out.println(clusterStatus.getBalancerOn());
  System.out.println(clusterStatus.getClass());
  System.out.println(clusterStatus.getDeadServerNames());
  System.out.println(clusterStatus.getMasterCoprocessors());
  System.out.println(clusterStatus.getRegionsInTransition());
  //Define rs name list
  List<ServerName> sn = new ArrayList<ServerName>();
  //Add rs names to list
  sn.addAll(clusterStatus.getServers());
  //Initialize ServerLoad for each rs
  for(int i = 0;i < clusterStatus.getServersSize(); i++){
   System.out.println("sn = " + sn.get(i));
   //Initialize ServerLoad
   ServerLoad serverLoad = clusterStatus.getLoad(sn.get(i));
   //Execute methods in class ServerLoad
   System.out.println(serverLoad.getNumberOfRegions());
   System.out.println(serverLoad.getCurrentCompactedKVs());
   System.out.println(serverLoad.getMaxHeapMB());
   System.out.println(serverLoad.getNumberOfRequests());
   System.out.println(serverLoad.getMemstoreSizeInMB());
   System.out.println(serverLoad.getInfoServerPort());
   System.out.println(serverLoad.getReadRequestsCount());
   System.out.println(serverLoad.getRequestsPerSecond());
   System.out.println(serverLoad.getRootIndexSizeKB());
   System.out.println(serverLoad.getStorefileIndexSizeInMB());
   System.out.println(serverLoad.getStorefiles());
   System.out.println(serverLoad.getStorefileSizeInMB());
   System.out.println(serverLoad.getStores());
   System.out.println(serverLoad.getRegionServerCoprocessors());
  }
  
   }
}


主类

package com.huawei.bigdata.hbase.examples;

import java.io.File;
import java.io.IOException;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.security.User;

import com.huawei.hadoop.security.LoginUtil;

public class TestMain {
  private final static Log LOG = LogFactory.getLog(TestMain.class.getName());

  private static final String ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME = "Client";
  private static final String ZOOKEEPER_SERVER_PRINCIPAL_KEY = "zookeeper.server.principal";
  private static final String ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL = "zookeeper/hadoop.hadoop.com";

  private static Configuration conf = null;
  private static String krb5File = null;
  private static String userName = null;
  private static String userKeytabFile = null;

  public static void main(String[] args) throws Throwable {
    try {
      init();
      login();
    } catch (IOException e) {
      LOG.error("Failed to login because ", e);
      return;
    }
    // getDefaultConfiguration();

    // test hbase
    TestHBaseAPI testHBaseAPI = new TestHBaseAPI(conf);
    testHBaseAPI.testAPI();
  }
    LOG.info("-----------finish HBase -------------------");

  private static void login() throws IOException {
    if (User.isHBaseSecurityEnabled(conf)) {
      String userdir = System.getProperty("user.dir") + File.separator + "conf" + File.separator;
      userName = "austin";
      userKeytabFile = userdir + "user.keytab";
      krb5File = userdir + "krb5.conf";

      /*
       * if need to connect zk, please provide jaas info about zk. of course,
       * you can do it as below:
       * System.setProperty("java.security.auth.login.config", confDirPath +
       * "jaas.conf"); but the demo can help you more : Note: if this process
       * will connect more than one zk cluster, the demo may be not proper. you
       * can contact us for more help
       */
      LoginUtil.setJaasConf(ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME, userName, userKeytabFile);
      LoginUtil.setZookeeperServerPrincipal(ZOOKEEPER_SERVER_PRINCIPAL_KEY,
          ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL);
      LoginUtil.login(userName, userKeytabFile, krb5File, conf);
    }
  }

  private static void init() throws IOException {
    // Default load from conf directory
    conf = HBaseConfiguration.create();
    String userdir = System.getProperty("user.dir") + File.separator + "conf" + File.separator;
    conf.addResource(new Path(userdir + "core-site.xml"));
    conf.addResource(new Path(userdir + "hdfs-site.xml"));
    conf.addResource(new Path(userdir + "hbase-site.xml"));

  }

}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值