TestHBaseAPI类
package com.huawei.bigdata.hbase.examples;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.math3.stat.clustering.Cluster;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus;
import org.apache.hadoop.io.VersionedWritable;
public class TestHBaseAPI {
private final static Log LOG = LogFactory.getLog(HBaseSample.class.getName());
private TableName tableName = null;
private Configuration conf = null;
private Connection conn = null;
private Admin admin = null;
public TestHBaseAPI(Configuration conf) throws IOException {
this.conf = conf;
this.tableName = TableName.valueOf("austin06");
this.conn = ConnectionFactory.createConnection(conf);
}
public void testAPI() throws MasterNotRunningException, ZooKeeperConnectionException, IOException{
TestClusterStatus();
}
@SuppressWarnings("null")
public void TestClusterStatus() throws MasterNotRunningException, ZooKeeperConnectionException, IOException{
//Initialize admin
admin = conn.getAdmin();
//Initialize ClusterStatus
org.apache.hadoop.hbase.ClusterStatus clusterStatus = admin.getClusterStatus();
//Execute method in class ClusterStatus
System.out.println(clusterStatus.getVersion());
System.out.println(clusterStatus.getAverageLoad());
System.out.println(clusterStatus.getHBaseVersion());
System.out.println(clusterStatus.getBackupMastersSize());
System.out.println(clusterStatus.getClusterId());
System.out.println(clusterStatus.getDeadServers());
System.out.println(clusterStatus.getLastMajorCompactionTsForTable(tableName));
System.out.println(clusterStatus.getRegionsCount());
System.out.println(clusterStatus.getRequestsCount());
System.out.println(clusterStatus.getServersSize());
System.out.println(clusterStatus.getBalancerOn());
System.out.println(clusterStatus.getClass());
System.out.println(clusterStatus.getDeadServerNames());
System.out.println(clusterStatus.getMasterCoprocessors());
System.out.println(clusterStatus.getRegionsInTransition());
//Define rs name list
List<ServerName> sn = new ArrayList<ServerName>();
//Add rs names to list
sn.addAll(clusterStatus.getServers());
//Initialize ServerLoad for each rs
for(int i = 0;i < clusterStatus.getServersSize(); i++){
System.out.println("sn = " + sn.get(i));
//Initialize ServerLoad
ServerLoad serverLoad = clusterStatus.getLoad(sn.get(i));
//Execute methods in class ServerLoad
System.out.println(serverLoad.getNumberOfRegions());
System.out.println(serverLoad.getCurrentCompactedKVs());
System.out.println(serverLoad.getMaxHeapMB());
System.out.println(serverLoad.getNumberOfRequests());
System.out.println(serverLoad.getMemstoreSizeInMB());
System.out.println(serverLoad.getInfoServerPort());
System.out.println(serverLoad.getReadRequestsCount());
System.out.println(serverLoad.getRequestsPerSecond());
System.out.println(serverLoad.getRootIndexSizeKB());
System.out.println(serverLoad.getStorefileIndexSizeInMB());
System.out.println(serverLoad.getStorefiles());
System.out.println(serverLoad.getStorefileSizeInMB());
System.out.println(serverLoad.getStores());
System.out.println(serverLoad.getRegionServerCoprocessors());
}
}
}
主类
package com.huawei.bigdata.hbase.examples;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.security.User;
import com.huawei.hadoop.security.LoginUtil;
public class TestMain {
private final static Log LOG = LogFactory.getLog(TestMain.class.getName());
private static final String ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME = "Client";
private static final String ZOOKEEPER_SERVER_PRINCIPAL_KEY = "zookeeper.server.principal";
private static final String ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL = "zookeeper/hadoop.hadoop.com";
private static Configuration conf = null;
private static String krb5File = null;
private static String userName = null;
private static String userKeytabFile = null;
public static void main(String[] args) throws Throwable {
try {
init();
login();
} catch (IOException e) {
LOG.error("Failed to login because ", e);
return;
}
// getDefaultConfiguration();
// test hbase
TestHBaseAPI testHBaseAPI = new TestHBaseAPI(conf);
testHBaseAPI.testAPI();
}
LOG.info("-----------finish HBase -------------------");
private static void login() throws IOException {
if (User.isHBaseSecurityEnabled(conf)) {
String userdir = System.getProperty("user.dir") + File.separator + "conf" + File.separator;
userName = "austin";
userKeytabFile = userdir + "user.keytab";
krb5File = userdir + "krb5.conf";
/*
* if need to connect zk, please provide jaas info about zk. of course,
* you can do it as below:
* System.setProperty("java.security.auth.login.config", confDirPath +
* "jaas.conf"); but the demo can help you more : Note: if this process
* will connect more than one zk cluster, the demo may be not proper. you
* can contact us for more help
*/
LoginUtil.setJaasConf(ZOOKEEPER_DEFAULT_LOGIN_CONTEXT_NAME, userName, userKeytabFile);
LoginUtil.setZookeeperServerPrincipal(ZOOKEEPER_SERVER_PRINCIPAL_KEY,
ZOOKEEPER_DEFAULT_SERVER_PRINCIPAL);
LoginUtil.login(userName, userKeytabFile, krb5File, conf);
}
}
private static void init() throws IOException {
// Default load from conf directory
conf = HBaseConfiguration.create();
String userdir = System.getProperty("user.dir") + File.separator + "conf" + File.separator;
conf.addResource(new Path(userdir + "core-site.xml"));
conf.addResource(new Path(userdir + "hdfs-site.xml"));
conf.addResource(new Path(userdir + "hbase-site.xml"));
}
}