我的HBase之旅

最近在学hbase,看了好多资料,具体的参考:
[url]http://blog.chinaunix.net/u3/102568/article_144792.html[/url]
[url]http://blog.csdn.net/dajuezhao/category/724896.aspx[/url]
[url]http://www.javabloger.com/article/apache-hbase-shell-and-install-key-value.html[/url]
以上三个里面的所有资料都看了,相信你就知道一定的hbase概念了。
好了,现在讲讲我的配置环境:
cygwin + hadoop-0.20.2 + zookeeper-3.3.2 + hbase-0.20.6 (+ eclipse3.6)
具体的配置细节,这里不讲了,网上很多,只要细心就没问题。
假设都配置好了,那么启动这些服务吧,据说启动顺序也是有要求的:
1,hadoop ./start-all.sh
2,zookeeper ./zkServer.sh start
3,hbase ./start-hbase.sh
停止的时候也是有顺序的, hbase--zookeeper--hadoop

成功后的界面截图:
http://localhost:60010/master.jsp 【hbase的管理信息】
[img]http://dl.iteye.com/upload/attachment/352426/358fe531-0f88-3923-92a6-de986738caf5.jpg[/img]
[img]http://dl.iteye.com/upload/attachment/352424/b61e93c4-dbd7-3163-9514-57b8c24aeece.jpg[/img]

下面就写java代码来操作hbase,我写了简单的增删改查:

package org.test;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Scanner;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Bytes;

public class TestHBase {
public final static String COLENDCHAR = String.valueOf(KeyValue.COLUMN_FAMILY_DELIMITER);//":"
final String key_colName = "colN";
final String key_colCluster = "colClut";
final String key_colDataType = "colDT";
final String key_colVal = "colV";
//hbase的环境变量
HBaseConfiguration conf;
HBaseAdmin admin = null;
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
TestHBase app = new TestHBase();

//app.test();

app.init();
app.go();
app.list();
}

void list(){
try {
String tableName = "htcjd0";
Map rsMap = this.getHTData(tableName);
System.out.println(rsMap.toString());
} catch (Exception e) {
e.printStackTrace();
}
}
void go(){
try {
//建表
String tableName = "htcjd0";
String[] columns = new String[]{"col"};
this.createHTable(tableName, columns);
//插入数据
List list = new ArrayList();
List rowList = null;
Map rowMap = null;
for (int i = 0; i < 10; i++) {
rowList = new ArrayList();

rowMap = new HashMap();
rowMap.put(key_colName, "col");
//rowMap.put(key_colCluster, "cl_name");
rowMap.put(key_colVal, "陈杰堆nocluster"+i);
rowList.add(rowMap);

rowMap = new HashMap();
rowMap.put(key_colName, "col");
rowMap.put(key_colCluster, "cl_name");
rowMap.put(key_colVal, "陈杰堆cl_"+i);
rowList.add(rowMap);

rowMap = new HashMap();
rowMap.put(key_colName, "col");
rowMap.put(key_colCluster, "cl_age");
rowMap.put(key_colVal, "cl_"+i);
rowList.add(rowMap);

rowMap = new HashMap();
rowMap.put(key_colName, "col");
rowMap.put(key_colCluster, "cl_sex");
rowMap.put(key_colVal, "列cl_"+i);
rowList.add(rowMap);

list.add(rowList);
}
HTable hTable = this.getHTable(tableName);
this.insertRow(hTable, list);
} catch (Exception e) {
e.printStackTrace();
}
}
void go0(){
try {
//建表
String tableName = "htcjd";
String[] columns = new String[]{"name","age","col"};
this.createHTable(tableName, columns);
//插入数据
List list = new ArrayList();
List rowList = null;
Map rowMap = null;
for (int i = 0; i < 10; i++) {
rowList = new ArrayList();
rowMap = new HashMap();
rowMap.put(key_colName, "name");
rowMap.put(key_colVal, "测试hbase"+i);

rowMap.put(key_colName, "age");
rowMap.put(key_colVal, ""+i);

rowMap.put(key_colName, "col");
rowMap.put(key_colVal, "列"+i);

rowList.add(rowMap);

list.add(rowList);
}
HTable hTable = this.getHTable(tableName);
this.insertRow(hTable, list);
} catch (Exception e) {
e.printStackTrace();
}
}
void init() {
try {
Configuration HBASE_CONFIG = new Configuration();
HBASE_CONFIG.set("hbase.zookeeper.quorum", "127.0.0.1");
HBASE_CONFIG.set("hbase.zookeeper.property.clientPort", "2181");
this.conf = new HBaseConfiguration(HBASE_CONFIG);// new HBaseConfiguration();
this.admin = new HBaseAdmin(conf);
} catch (Exception e) {
e.printStackTrace();
}
}

/**
* 创建表的描述
* @param tableName
* @return
* @throws Exception
*/
HTableDescriptor createHTDesc(final String tableName)throws Exception{
try {
return new HTableDescriptor(tableName);
} catch (Exception e) {
throw e;
}
}

/**
* 针对hbase的列的特殊情况进行处理,列的情况: course: or course:math,
* 就是要么带列族,要么不带列族(以冒号结尾)
* @param colName 列
* @param cluster 列族
* @return
*/
String fixColName(String colName,String cluster){
if(cluster!=null&&cluster.trim().length()>0&&colName.endsWith(cluster)){
return colName;
}
String tmp = colName;
int index = colName.indexOf(COLENDCHAR);
//int leng = colName.length();
if(index == -1){
tmp += COLENDCHAR;
}
//直接加入列族
if(cluster!=null&&cluster.trim().length()>0){
tmp += cluster;
}
return tmp;
}
String fixColName(String colName){
return this.fixColName(colName, null);
}

/**
* 创建列的描述,添加后,该列会有一个冒号的后缀,用于存储(列)族,
* 将来如果需要扩展,那么就在该列后加入(列)族
* @param colName
* @return
* @throws Exception
*/
HColumnDescriptor createHCDesc(String colName)throws Exception{
try {
String tmp = this.fixColName(colName);
byte[] colNameByte = Bytes.toBytes(tmp);
return new HColumnDescriptor(colNameByte);
} catch (Exception e) {
throw e;
}
}

/**
* 给表添加列,此时不带列族
* @param htdesc
* @param colName
* @param readonly
* @throws Exception
*/
void addFamily(HTableDescriptor htdesc,String colName,final boolean readonly)throws Exception{
try {
htdesc.addFamily(this.createHCDesc(colName));
htdesc.setReadOnly(readonly);
} catch (Exception e) {
throw e;
}
}

/**
* 删除列--不带列族
* @param tableName
* @param colName
* @throws Exception
*/
void removeFamily(String tableName,String colName)throws Exception{
try {
String tmp = this.fixColName(colName);
this.admin.deleteColumn(tableName, tmp);
} catch (Exception e) {
throw e;
}
}

/**
* 删除列--带列族
* @param tableName
* @param colName
* @param cluster
* @throws Exception
*/
void removeFamily(String tableName,String colName,String cluster)throws Exception{
try {
String tmp = this.fixColName(colName,cluster);
this.admin.deleteColumn(tableName, tmp);
} catch (Exception e) {
throw e;
}
}
/**
* 建表
* @param tableName
* @param columns
* @throws Exception
*/
void createHTable(String tableName)throws Exception{
try {
if(admin.tableExists(tableName))return;//判断表是否已经存在
HTableDescriptor htdesc = this.createHTDesc(tableName);
admin.createTable(htdesc);
} catch (Exception e) {
throw e;
}
}
void createHTable(String tableName,String[] columns)throws Exception{
try {
if(admin.tableExists(tableName))return;//判断表是否已经存在
HTableDescriptor htdesc = this.createHTDesc(tableName);
for (int i = 0; i < columns.length; i++) {
String colName = columns[i];
this.addFamily(htdesc, colName, false);
}
admin.createTable(htdesc);
} catch (Exception e) {
throw e;
}
}
/**
* 删除表
* @param tableName
* @throws Exception
*/
void removeHTable(String tableName)throws Exception{
try {
admin.disableTable(tableName);//使无效
admin.deleteTable(tableName);//再删除
} catch (Exception e) {
throw e;
}
}

/**
* 取得某个表
* @param tableName
* @return
* @throws Exception
*/
HTable getHTable(String tableName)throws Exception{
try {
return new HTable(conf, tableName);
} catch (Exception e) {
throw e;
}
}

void updateColumn(String tableName,String rowID,String colName,String cluster,String value)throws Exception{
try {
BatchUpdate batchUpdate = new BatchUpdate(rowID);
String tmp = this.fixColName(colName, cluster);
batchUpdate.put(tmp, Bytes.toBytes(value));

HTable hTable = this.getHTable(tableName);
hTable.commit(batchUpdate);
} catch (Exception e) {
throw e;
}
}

void updateColumn(String tableName,String rowID,String colName,String value)throws Exception{
try {
this.updateColumn(tableName, rowID, colName, null, value);
} catch (Exception e) {
throw e;
}
}

void deleteColumn(String tableName,String rowID,String colName,String cluster)throws Exception{
try {
BatchUpdate batchUpdate = new BatchUpdate(rowID);
String tmp = this.fixColName(colName, cluster);
batchUpdate.delete(tmp);
HTable hTable = this.getHTable(tableName);
hTable.commit(batchUpdate);
} catch (Exception e) {
throw e;
}
}

void deleteColumn(String tableName,String rowID,String colName)throws Exception{
try {
this.deleteColumn(tableName, rowID, colName, null);
} catch (Exception e) {
throw e;
}
}
/**
* 取得某一行,某一列的值
* @param tableName
* @param rowID
* @param colName
* @param cluster
* @return
* @throws Exception
*/
String getColumnValue(String tableName,String rowID,String colName,String cluster)throws Exception{
try {
String tmp = this.fixColName(colName, cluster);
HTable hTable = this.getHTable(tableName);
Cell cell = hTable.get(rowID, tmp);
if(cell==null)return null;
return new String(cell.getValue());
} catch (Exception e) {
throw e;
}
}

/**
* 取得所属列的值
* @param tableName
* @param colName
* @param cluster 如果该参数为空,那么返回所有列族的值
* @return
* @throws Exception
*/
Map getColumnValue(String tableName, String colName, String cluster)throws Exception {
Scanner scanner = null;
try {
String tmp = this.fixColName(colName, cluster);
HTable hTable = this.getHTable(tableName);
scanner = hTable.getScanner(new String[] { tmp });// "myColumnFamily:columnQualifier1"
RowResult rowResult = scanner.next();
Map resultMap = new HashMap();
String row, value;
Cell cell = null;
while (rowResult != null) {
// print out the row we found and the columns we were looking
// for
// System.out.println("Found row: "
// + new String(rowResult.getRow())
// + " with value: "
// + rowResult.get("myColumnFamily:columnQualifier1"
// .getBytes()));
row = new String(rowResult.getRow());
cell = rowResult.get(Bytes.toBytes(tmp));
if (cell == null) {
resultMap.put(row, null);
} else {
resultMap.put(row, cell.getValue());
}
rowResult = scanner.next();
}

return resultMap;
} catch (Exception e) {
throw e;
}finally{
if(scanner!=null){
scanner.close();//一定要关闭
}
}
}

/**
* 取得所有数据
* @param tableName
* @return Map
* @throws Exception
*/
public Map getHTData(String tableName) throws Exception {
ResultScanner rs = null;
try {
HTable table = new HTable(this.conf, tableName);
Scan s = new Scan();
rs = table.getScanner(s);
Map resultMap = new HashMap();
for (Result r : rs) {
for (KeyValue kv : r.raw()) {
resultMap.put(new String(kv.getColumn()),
new String(kv.getValue()));
}
}
return resultMap;
} catch (Exception e) {
throw e;
} finally {
if (rs != null)
rs.close();
}
}

//插入记录
void insertRow(HTable table,List dataList)throws Exception{
try {
Put put = null;
String colName = null;
String colCluster = null;
String colDataType = null;
byte[] value;
List rowDataList = null;
Map rowDataMap = null;
for (Iterator iterator = dataList.iterator(); iterator.hasNext();) {
rowDataList = (List) iterator.next();
for(int i=0;i<rowDataList.size();i++){
rowDataMap = (Map) rowDataList.get(i);
colName = (String)rowDataMap.get(key_colName);
colCluster = (String)rowDataMap.get(key_colCluster);
colDataType = (String)rowDataMap.get(key_colDataType);
Object val = rowDataMap.get(key_colVal);
value = Bytes.toBytes(String.valueOf(val));
// //根据数据类型来处理
// if("string".equalsIgnoreCase(colDataType)){
// value = Bytes.toBytes((String)val);
// }else if("int".equalsIgnoreCase(colDataType)){
// value = Bytes.toInt(Integer.parseInt(String.valueOf(val)));
// }else if("float".equalsIgnoreCase(colDataType)){
// value = Bytes.toBytes(Float.parseFloat(String.valueOf(val)));
// }else if("long".equalsIgnoreCase(colDataType)){
// value = Bytes.toBytes(Long.parseLong(String.valueOf(val)));
// }else if("double".equalsIgnoreCase(colDataType)){
// value = Bytes.toBytes(Double.parseDouble(String.valueOf(val)));
// }else if("char".equalsIgnoreCase(colDataType)){
// value = Bytes.toBytes((String.valueOf(val)));
// }else if("byte".equalsIgnoreCase(colDataType)){
// value = Bytes.totoBytes((byte[])val);
// }
put = new Put(value);
String tmp = this.fixColName(colName, colCluster);
byte[] colNameByte = Bytes.toBytes(tmp);
byte[][] famAndQf = KeyValue.parseColumn(colNameByte);
put.add(famAndQf[0], famAndQf[1], value);
table.put(put);
}
}
} catch (Exception e) {
throw e;
}
}
//取得表的结构信息

}

然后在eclipse里面运行,可以看到结果:


[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:zookeeper.version=3.3.2-1031432, built on 11/05/2010 05:32 GMT
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:host.name=chenjiedui
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:java.version=1.6.0_05
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:java.vendor=Sun Microsystems Inc.
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:java.home=D:\jdk1.6.0_05\jre
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:java.class.path=D:\workspace\MyHadoopApp\bin;D:\workspace\MyHadoopApp\lib\commons-lang-2.4.jar;D:\workspace\MyHadoopApp\lib\commons-logging-1.1.1.jar;D:\workspace\Hadoop0.20.2\bin;D:\workspace\Hadoop0.20.2\lib\commons-cli-1.2.jar;D:\workspace\Hadoop0.20.2\lib\commons-codec-1.3.jar;D:\workspace\Hadoop0.20.2\lib\commons-el-1.0.jar;D:\workspace\Hadoop0.20.2\lib\commons-httpclient-3.0.1.jar;D:\workspace\Hadoop0.20.2\lib\commons-logging-1.0.4.jar;D:\workspace\Hadoop0.20.2\lib\commons-logging-api-1.0.4.jar;D:\workspace\Hadoop0.20.2\lib\commons-net-1.4.1.jar;D:\workspace\Hadoop0.20.2\lib\core-3.1.1.jar;D:\workspace\Hadoop0.20.2\lib\hsqldb-1.8.0.10.jar;D:\workspace\Hadoop0.20.2\lib\jasper-compiler-5.5.12.jar;D:\workspace\Hadoop0.20.2\lib\jasper-runtime-5.5.12.jar;D:\workspace\Hadoop0.20.2\lib\jets3t-0.6.1.jar;D:\workspace\Hadoop0.20.2\lib\jetty-6.1.14.jar;D:\workspace\Hadoop0.20.2\lib\jetty-util-6.1.14.jar;D:\workspace\Hadoop0.20.2\lib\junit-3.8.1.jar;D:\workspace\Hadoop0.20.2\lib\kfs-0.2.2.jar;D:\workspace\Hadoop0.20.2\lib\log4j-1.2.15.jar;D:\workspace\Hadoop0.20.2\lib\mockito-all-1.8.0.jar;D:\workspace\Hadoop0.20.2\lib\oro-2.0.8.jar;D:\workspace\Hadoop0.20.2\lib\servlet-api-2.5-6.1.14.jar;D:\workspace\Hadoop0.20.2\lib\slf4j-api-1.4.3.jar;D:\workspace\Hadoop0.20.2\lib\slf4j-log4j12-1.4.3.jar;D:\workspace\Hadoop0.20.2\lib\xmlenc-0.52.jar;D:\workspace\Hadoop0.20.2\lib\ant.jar;D:\workspace\Hadoop0.20.2\lib\jsp-2.1.jar;D:\workspace\Hadoop0.20.2\lib\jsp-api-2.1.jar;D:\workspace\Hadoop0.20.2\lib\ftplet-api-1.0.0-SNAPSHOT.jar;D:\workspace\Hadoop0.20.2\lib\ftpserver-core-1.0.0-SNAPSHOT.jar;D:\workspace\Hadoop0.20.2\lib\ftpserver-server-1.0.0-SNAPSHOT.jar;D:\workspace\Hadoop0.20.2\lib\libthrift.jar;D:\workspace\Hadoop0.20.2\lib\mina-core-2.0.0-M2-20080407.124109-12.jar;D:\workspace\Hadoop0.20.2\libs\lucene\lucene-core-3.0.1.jar;D:\workspace\HBase0.20.6\bin;D:\workspace\HBase0.20.6\lib\commons-cli-2.0-SNAPSHOT.jar;D:\workspace\HBase0.20.6\lib\commons-el-from-jetty-5.1.4.jar;D:\workspace\HBase0.20.6\lib\commons-httpclient-3.0.1.jar;D:\workspace\HBase0.20.6\lib\commons-logging-1.0.4.jar;D:\workspace\HBase0.20.6\lib\commons-logging-api-1.0.4.jar;D:\workspace\HBase0.20.6\lib\commons-math-1.1.jar;D:\workspace\HBase0.20.6\lib\hadoop-0.20.2-core.jar;D:\workspace\HBase0.20.6\lib\jasper-compiler-5.5.12.jar;D:\workspace\HBase0.20.6\lib\jasper-runtime-5.5.12.jar;D:\workspace\HBase0.20.6\lib\jetty-6.1.14.jar;D:\workspace\HBase0.20.6\lib\jetty-util-6.1.14.jar;D:\workspace\HBase0.20.6\lib\jruby-complete-1.2.0.jar;D:\workspace\HBase0.20.6\lib\junit-4.8.1.jar;D:\workspace\HBase0.20.6\lib\libthrift-r771587.jar;D:\workspace\HBase0.20.6\lib\log4j-1.2.15.jar;D:\workspace\HBase0.20.6\lib\lucene-core-2.2.0.jar;D:\workspace\HBase0.20.6\lib\servlet-api-2.5-6.1.14.jar;D:\workspace\HBase0.20.6\lib\xmlenc-0.52.jar;D:\workspace\HBase0.20.6\lib\zookeeper-3.3.2.jar;D:\workspace\MyHadoopApp\lib\commons-cli-2.0-SNAPSHOT.jar;D:\workspace\MyHadoopApp\lib\log4j-1.2.15.jar;D:\workspace\MyHadoopApp\lib\hbase\commons-el-from-jetty-5.1.4.jar;D:\workspace\MyHadoopApp\lib\hbase\commons-httpclient-3.0.1.jar;D:\workspace\MyHadoopApp\lib\hbase\commons-logging-api-1.0.4.jar;D:\workspace\MyHadoopApp\lib\hbase\commons-math-1.1.jar;D:\workspace\MyHadoopApp\lib\hbase\jasper-compiler-5.5.12.jar;D:\workspace\MyHadoopApp\lib\hbase\jasper-runtime-5.5.12.jar;D:\workspace\MyHadoopApp\lib\hbase\jetty-6.1.14.jar;D:\workspace\MyHadoopApp\lib\hbase\jetty-util-6.1.14.jar;D:\workspace\MyHadoopApp\lib\hbase\jruby-complete-1.2.0.jar;D:\workspace\MyHadoopApp\lib\hbase\libthrift-r771587.jar;D:\workspace\MyHadoopApp\lib\hbase\lucene-core-2.2.0.jar;D:\workspace\MyHadoopApp\lib\hbase\servlet-api-2.5-6.1.14.jar;D:\workspace\MyHadoopApp\lib\hbase\xmlenc-0.52.jar;D:\workspace\MyHadoopApp\lib\hbase\zookeeper-3.3.2.jar
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:java.library.path=D:\jdk1.6.0_05\bin;.;C:\WINDOWS\Sun\Java\bin;C:\WINDOWS\system32;C:\WINDOWS;d:/jdk1.6.0_05/bin/../jre/bin/client;d:/jdk1.6.0_05/bin/../jre/bin;d:/jdk1.6.0_05/bin/../jre/lib/i386;D:\cygwin\bin;D:\cygwin\usr\sbin;d:\oracle\product\10.2.0\db_1\bin;d:\jdk1.6.0_05\bin;D:\apache-ant-1.8.0RC1\bin;C:\WINDOWS\system32;C:\WINDOWS;C:\WINDOWS\System32\Wbem;C:\Program Files\Intel\WiFi\bin\;C:\Program Files\ThinkPad\ConnectUtilities;C:\Program Files\Common Files\Lenovo;d:\Program Files\cvsnt;C:\Program Files\Common Files\Thunder Network\KanKan\Codecs;C:\Program Files\Common Files\TTKN\Bin;C:\Program Files\StormII\Codec;C:\Program Files\StormII
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:java.io.tmpdir=C:\DOCUME~1\ADMINI~1\LOCALS~1\Temp\
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:java.compiler=<NA>
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:os.name=Windows XP
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:os.arch=x86
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:os.version=5.1
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:user.name=Administrator
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:user.home=C:\Documents and Settings\Administrator
[hadoop] INFO [main] ZooKeeper.logEnv(97) | Client environment:user.dir=D:\workspace\MyHadoopApp
[hadoop] INFO [main] ZooKeeper.<init>(373) | Initiating client connection, connectString=127.0.0.1:2181 sessionTimeout=60000 watcher=org.apache.hadoop.hbase.client.HConnectionManager$ClientZKWatcher@cd2c3c
[hadoop] INFO [main-SendThread()] ClientCnxn.startConnect(1041) | Opening socket connection to server /127.0.0.1:2181
[hadoop] INFO [main-SendThread(localhost:2181)] ClientCnxn.primeConnection(949) | Socket connection established to localhost/127.0.0.1:2181, initiating session
[hadoop] INFO [main-SendThread(localhost:2181)] ClientCnxn.readConnectResult(738) | Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x12c6c8d8f6d0010, negotiated timeout = 40000
输出结果:
{col:cl_name=陈杰堆cl_9, col:name=陈杰堆9, col:sex=列9, col:cl_age=cl_9, col:cl_sex=列cl_9, col:age=9, col:=陈杰堆nocluster9}
[hadoop] INFO [HCM.shutdownHook] ZooKeeper.close(538) | Session: 0x12c6c8d8f6d0010 closed
[hadoop] INFO [main-EventThread] ClientCnxn.run(520) | EventThread shut down
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值