Hbase实践

1. 依赖包:

          <!-- HADOOP 依赖 -->
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-client</artifactId>
			<version>2.6.0</version>
			<exclusions>
				<exclusion>
					<groupId>tomcat</groupId>
					<artifactId>jasper-compiler</artifactId>
				</exclusion>
				<exclusion>
					<groupId>tomcat</groupId>
					<artifactId>jasper-runtime</artifactId>
				</exclusion>
			</exclusions>
		</dependency>

		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-common</artifactId>
			<version>2.6.0</version>
			<exclusions>
				<exclusion>
					<groupId>tomcat</groupId>
					<artifactId>jasper-compiler</artifactId>
				</exclusion>
				<exclusion>
					<groupId>tomcat</groupId>
					<artifactId>jasper-runtime</artifactId>
				</exclusion>
			</exclusions>
		</dependency>

		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-hdfs</artifactId>
			<version>2.6.0</version>
		</dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
		<dependency>
			<groupId>org.apache.hbase</groupId>
			<artifactId>hbase-client</artifactId>
			<version>1.2.0-cdh5.7.2</version>
		</dependency>

		<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-common -->
		<dependency>
			<groupId>org.apache.hbase</groupId>
			<artifactId>hbase-common</artifactId>
			<version>1.2.0-cdh5.7.2</version>
		</dependency>

 

2. 工具类(引用程序源文件位置hbase-site.xml和core-site.xml文件):

package com.rz.util;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import org.apache.commons.collections.CollectionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class HbaseClientUtils {

	public static Configuration configuration = null;
	public static Connection connection = null;
	public static final Logger LOGGER = LoggerFactory.getLogger(HbaseClientUtils.class);

	/**
	 * @throws IOException
	 */
	public static void init() {
//		System.setProperty("HADOOP_USER_NAME", "hdfs");
		if (configuration == null || connection == null || connection.isClosed()) {
			configuration = HBaseConfiguration.create();
			configuration.addResource("hbase-site.xml");
			configuration.addResource("core-site.xml");
			configuration.setInt("hbase.rpc.timeout", 120000);
			configuration.setInt("hbase.client.operation.timeout", 120000);
			configuration.setInt("hbase.client.scanner.timeout.period", 200000);

			try {
				connection = ConnectionFactory.createConnection(configuration);
			} catch (Exception e1) {
				LOGGER.error("Failed to open the connection ", e1);
			}
		}
	}

	public static void close() {
		if (connection != null) {
			try {
				connection.close();
			} catch (Exception e1) {
				LOGGER.error("Failed to close the connection ", e1);
			}
		}
	}

	/**
	 * 判断表是否存在
	 * 
	 * @param tableName
	 *            表名
	 * @return 是否存在
	 * @throws IOException
	 *             异常
	 */
	public static boolean isTableExists(String tableName) throws IOException {
		Admin admin = ConnectionFactory.createConnection(configuration).getAdmin();
		boolean isExists = admin.tableExists(TableName.valueOf(tableName));
		return isExists;
	}

	/**
	 * 取得table接口对象
	 * 
	 * @param tableName
	 *            表名
	 * @return Table interface
	 * @throws IOException
	 *             异常
	 */
	public static Table getTable(String tableName) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));
		return table;
	}

	/**
	 * 创建表
	 * 
	 * @param tableName
	 * @param hColumn
	 * @throws IOException
	 */
	public static void createTable(String tableName, String... hColumn) throws IOException {
		LOGGER.info("start create table ......");
		Admin admin = ConnectionFactory.createConnection(configuration).getAdmin();
		HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
		for (String hc : hColumn) {
			HColumnDescriptor hcdInfo = new HColumnDescriptor(hc);
			hcdInfo.setMaxVersions(3);
			htd.addFamily(hcdInfo);
		}
		admin.createTable(htd);
		// 用完关闭
		admin.close();
		LOGGER.info("end create table ......");
	}

	/**
	 * 创建hbase表
	 * 
	 * @param tableName
	 *            创建表名称
	 * @param version
	 *            版本数量,如果指定版本,则创建是设置的版本,如果version《=0,则默认设置版本为3
	 * @param hColumn
	 *            列族
	 * @throws IOException
	 */
	public static void createTableByVersion(String tableName, int version, String... hColumn) throws IOException {
		LOGGER.info("创建hbase表:【" + tableName + "】 版本数量为:" + version);
		Admin admin = ConnectionFactory.createConnection(configuration).getAdmin();
		HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
		for (String hc : hColumn) {
			HColumnDescriptor hcdInfo = new HColumnDescriptor(hc);
			if (version > 0) {
				hcdInfo.setMaxVersions(version);
			} else {
				hcdInfo.setMaxVersions(3);
			}
			htd.addFamily(hcdInfo);
		}
		admin.createTable(htd);
		// 用完关闭
		admin.close();
		LOGGER.info("创建hbase表【" + tableName + "】完成!");
	}

	/**
	 * 删除表
	 * 
	 * @param tableName
	 * @throws IOException
	 */
	public static void deleteTable(String tableName) throws IOException {
		Admin admin = ConnectionFactory.createConnection(configuration).getAdmin();
		if (admin.tableExists(TableName.valueOf(tableName))) {
			admin.disableTable(TableName.valueOf(tableName));
			LOGGER.info("禁用表" + tableName + "!");
			admin.deleteTable(TableName.valueOf(tableName));
			LOGGER.info("删除表成功!");
		} else {
			LOGGER.info(tableName + "表不存在 !");
		}
	}

	/**
	 * 清空表
	 * 
	 * @param tableName
	 * @throws IOException
	 */
	public static void truncateTable(String tableName) throws IOException {
		Admin admin = ConnectionFactory.createConnection(configuration).getAdmin();
		if (admin.tableExists(TableName.valueOf(tableName))) {
			admin.disableTable(TableName.valueOf(tableName));
			LOGGER.info("禁用表" + tableName + "!");
			admin.truncateTable(TableName.valueOf(tableName), true);
			LOGGER.info("清空表成功!");
		} else {
			LOGGER.info(tableName + "表不存在 !");
		}
	}

	/**
	 * 插入一行
	 * 
	 * @param tableName
	 * @param rowkey
	 * @param family
	 * @param qualifier
	 * @param value
	 * @throws IOException
	 */
	public static void insertRecord(String tableName, String rowkey, String family, String qualifier, String value) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));
		Put put = new Put(rowkey.getBytes());
		put.addColumn(family.getBytes(), qualifier.getBytes(), value.getBytes());
		table.put(put);
		LOGGER.info(tableName + "插入key:" + rowkey + "行成功!");
	}

	/**
	 * 删除行
	 * 
	 * @param tableName
	 * @param rowkey
	 * @throws IOException
	 */
	public static void deleteRecord(String tableName, String rowkey) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));
		Delete del = new Delete(rowkey.getBytes());
		table.delete(del);
		System.out.println(tableName + "删除行" + rowkey + "成功!");
	}

	/**
	 * 获取一行
	 * 
	 * @param tableName
	 * @param rowkey
	 * @return
	 * @throws IOException
	 */
	public static Result getOneRecord(String tableName, String rowkey) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));
		Get get = new Get(rowkey.getBytes());
		Result rs = table.get(get);
		return rs;
	}

	/**
	 * 取得全部 单个版本
	 * 
	 * @param tableName
	 * @return
	 * 
	 *      for (Result r : scanner) {
	 *         list.add(r);
	 *      }
	 *      scanner.close()
	 * @throws IOException
	 */
	public static ResultScanner getAllRecord(String tableName) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));
		Scan scan = new Scan();
		ResultScanner scanner = table.getScanner(scan);
		return scanner;
	}

	/**
	 * 查询全部,并根绝版本取值
	 * 
	 * @param tableName
	 *            hbase表名
	 * @param version
	 *            hbase版本
	 * @return 返回值 List<Result>
	 * @throws IOException
	 *             异常
	 */
	public static List<Result> getAllRecordByVersion(String tableName, Integer version) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));
		Scan scan = new Scan();
		if (version == null || version == 0) {
			scan.setMaxVersions();
		} else {
			scan.setMaxVersions(version);
		}
		ResultScanner scanner = table.getScanner(scan);
		List<Result> list = new ArrayList<Result>();
		for (Result r : scanner) {
			list.add(r);
		}
		scanner.close();
		return list;
	}

	/**
	 * @param tableName
	 * @return
	 * @throws IOException
	 */
	public static ResultScanner getAllRecordByTableName(String tableName) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));
		Scan scan = new Scan();

		// 设置客户端缓存
		scan.setCaching(1000);
		ResultScanner scanner = table.getScanner(scan);
		return scanner;
	}

	/**
	 * 更新表的一列
	 * 
	 * @param name
	 * @param rowKey
	 * @param familyName
	 * @param columnName
	 * @param value
	 * @throws IOException
	 */
	public static void updateTable(String name, String rowKey, String familyName, String columnName, String value) throws IOException {
		TableName tableName = TableName.valueOf(name);
		Table table = connection.getTable(tableName);

		Put put = new Put(Bytes.toBytes(rowKey));
		put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes(columnName), Bytes.toBytes(value));

		table.put(put);
		table.close();
	}

	/**
	 * 批量删除数据
	 * 
	 * @param list
	 * @throws IOException
	 */
	public static void deleteDataList(String tableNameStr, String family, String rowKEY, String colemn) throws IOException {
		Table table = null;
		List<Delete> deletes = new ArrayList<Delete>();

		TableName tableName = TableName.valueOf(tableNameStr);
		table = connection.getTable(tableName);

		Delete delete = new Delete(Bytes.toBytes(rowKEY));
		delete.addColumn(family.getBytes(), colemn.getBytes());
		deletes.add(delete);

		table.delete(deletes);
		table.close();
	}

	/**
	 * 根据条件筛选
	 * 
	 * @param tableName
	 * @param filters
	 * @return
	 * @throws IOException
	 */
	public static ResultScanner queryByFilterList(String tableName, List<Filter> filters) throws IOException {
		init();
		Table table = connection.getTable(TableName.valueOf(tableName));
		FilterList filterList1 = new FilterList(filters);
		Scan scan = new Scan();
		scan.setFilter(filterList1);
		ResultScanner rs = table.getScanner(scan);
		return rs;
	}

	/**
	 * 根绝rowkey和clumn查询
	 * 
	 * @param tableName
	 * @param rowKey
	 * @param family
	 * @param column
	 * @return
	 * @throws IOException
	 */
	public static Result getRowKeyAndClumn(String tableName, String rowKey, String family, String column) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));

		Get get = new Get(Bytes.toBytes(rowKey));
		get.addColumn(Bytes.toBytes(family), Bytes.toBytes(column));
		Result r = table.get(get);
//		for (KeyValue kv : r.raw()) {
//			System.out.println("column: " + new String(kv.getColumn()));
//			System.out.println("value: " + new String(kv.getValue()));
//		}
		return r;
	}

	/**
	 * 根据条件查询
	 * 
	 * @param tableName
	 * @param columnFilterMap
	 *            Map<familyname, List<columnValue>>
	 * @param filters
	 *            List<Filter>
	 * @return
	 * @throws IOException
	 */
	public static ResultScanner queryByCondition(String tableName, Map<String, List<String>> columnFilterMap, List<Filter> filters) throws IOException {
		ResultScanner rs = null;
		Table table = connection.getTable(TableName.valueOf(tableName));
		if (CollectionUtils.isNotEmpty(filters)) {

			FilterList filterList = new FilterList(filters);
			Scan scan = new Scan();
			for (String familyName : columnFilterMap.keySet()) {
				List<String> columnList = columnFilterMap.get(familyName);
				if (columnList == null || columnList.isEmpty()) {
					scan.addFamily(Bytes.toBytes(familyName));
				} else {
					for (String cl : columnList) {
						scan.addColumn(Bytes.toBytes(familyName), Bytes.toBytes(cl));
					}
				}
			}
			scan.setFilter(filterList);
			rs = table.getScanner(scan);
		} else {
			// 如果没有条件查询全量
			Scan scan = new Scan();
			scan.setFilter(new FirstKeyOnlyFilter());
			rs = table.getScanner(scan);
		}
		return rs;
	}

	/***
	 * 组合条件查询*
	 * 
	 * @param tableName
	 * @throws IOException
	 */
	public static ResultScanner queryByCondition(String tableName, String family, String qualifier, List<Filter> filters) throws IOException {
		Table table = connection.getTable(TableName.valueOf(tableName));
		FilterList filterList = new FilterList(filters);
//			List<Filter> filters = new ArrayList<Filter>();
//			Filter filter1 = new SingleColumnValueFilter(Bytes.toBytes("family_1"), null, CompareOp.EQUAL, Bytes.toBytes("v001"));
//			filters.add(filter1);
		Scan scan = new Scan();
		scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
		scan.setFilter(filterList);
		ResultScanner rs = table.getScanner(scan);

//			for (Result r : rs) {
//				System.out.println("获得到rowkey:" + new String(r.getRow()));
//				for (Cell cell : r.rawCells()) {
//					System.out.print(new String(CellUtil.cloneRow(cell)) + " ");
//					System.out.print(new String(CellUtil.cloneFamily(cell)) + ":");
//					System.out.print(new String(CellUtil.cloneQualifier(cell)) + " ");
//					System.out.print(cell.getTimestamp() + " ");
//					System.out.println(new String(CellUtil.cloneValue(cell)));
//				}
//			}
//			rs.close();
		return rs;
	}

	public static long rowCount(String tableName) throws IOException {
		long rowCount = 0;
		Table table = connection.getTable(TableName.valueOf(tableName));
		Scan scan = new Scan();
		scan.setFilter(new FirstKeyOnlyFilter());
		ResultScanner resultScanner = table.getScanner(scan);
		for (Result result : resultScanner) {
			System.out.println(result.size());
			rowCount += result.size();
		}
		return rowCount;
	}

	public static void main(String[] args) throws Throwable {

	}
}

core-site.xml:

<?xml version="1.0" encoding="UTF-8"?>
<!--Autogenerated by Cloudera Manager-->
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://nameservice1</value>
  </property>
  <property>
    <name>fs.trash.interval</name>
    <value>1</value>
  </property>
  <property>
    <name>io.compression.codecs</name>
    <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.Lz4Codec</value>
  </property>
  <property>
    <name>hadoop.security.authentication</name>
    <value>simple</value>
  </property>
  <property>
    <name>hadoop.security.authorization</name>
    <value>false</value>
  </property>
  <property>
    <name>hadoop.rpc.protection</name>
    <value>authentication</value>
  </property>
  <property>
    <name>hadoop.security.auth_to_local</name>
    <value>DEFAULT</value>
  </property>
  <property>
    <name>hadoop.proxyuser.oozie.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.oozie.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.mapred.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.mapred.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.flume.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.flume.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.HTTP.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.HTTP.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.hive.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.hive.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.hue.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.hue.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.httpfs.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.httpfs.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.hdfs.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.hdfs.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.yarn.hosts</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.proxyuser.yarn.groups</name>
    <value>*</value>
  </property>
  <property>
    <name>hadoop.security.group.mapping</name>
    <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
  </property>
  <property>
    <name>hadoop.security.instrumentation.requires.admin</name>
    <value>false</value>
  </property>
  <property>
    <name>hadoop.ssl.require.client.cert</name>
    <value>false</value>
    <final>true</final>
  </property>
  <property>
    <name>hadoop.ssl.keystores.factory.class</name>
    <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
    <final>true</final>
  </property>
  <property>
    <name>hadoop.ssl.server.conf</name>
    <value>ssl-server.xml</value>
    <final>true</final>
  </property>
  <property>
    <name>hadoop.ssl.client.conf</name>
    <value>ssl-client.xml</value>
    <final>true</final>
  </property>

  <property>
    <name>fs.hdfs.impl</name>
    <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
    <description>The FileSystem for hdfs: uris.</description>
  </property>
  <property>
    <name>fs.file.impl</name>
    <value>org.apache.hadoop.fs.LocalFileSystem</value>
    <description>The FileSystem for hdfs: uris.</description>
  </property>
</configuration>

hbase-site.xml:

<?xml version="1.0" encoding="UTF-8"?>

<!--Autogenerated by Cloudera Manager-->
<configuration>
  <property>:
    <name>hbase.rootdir</name>
    <value>hdfs://xx.xx.xx.xx:8020/hbase</value>
  </property>
  <property>
    <name>hbase.replication</name>
    <value>true</value>
  </property>
  <property>
    <name>hbase.client.write.buffer</name>
    <value>2097152</value>
  </property>
  <property>
    <name>hbase.client.pause</name>
    <value>100</value>
  </property>
  <property>
    <name>hbase.client.retries.number</name>
    <value>35</value>
  </property>
  <property>
    <name>hbase.client.scanner.caching</name>
    <value>100</value>
  </property>
  <property>
    <name>hbase.client.keyvalue.maxsize</name>
    <value>10485760</value>
  </property>
  <property>
    <name>hbase.ipc.client.allowsInterrupt</name>
    <value>true</value>
  </property>
  <property>
    <name>hbase.client.primaryCallTimeout.get</name>
    <value>10</value>
  </property>
  <property>
    <name>hbase.client.primaryCallTimeout.multiget</name>
    <value>10</value>
  </property>
  <property>
    <name>hbase.coprocessor.region.classes</name>
    <value>org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint</value>
  </property>
  <property>
    <name>hbase.regionserver.thrift.http</name>
    <value>false</value>
  </property>
  <property>
    <name>hbase.thrift.support.proxyuser</name>
    <value>false</value>
  </property>
  <property>
    <name>hbase.rpc.timeout</name>
    <value>120000</value>
  </property>
  <property>
    <name>hbase.snapshot.enabled</name>
    <value>true</value>
  </property>
  <property>
    <name>hbase.snapshot.master.timeoutMillis</name>
    <value>120000</value>
  </property>
  <property>
    <name>hbase.snapshot.region.timeout</name>
    <value>120000</value>
  </property>
  <property>
    <name>hbase.snapshot.master.timeout.millis</name>
    <value>120000</value>
  </property>
  <property>
    <name>hbase.security.authentication</name>
    <value>hdfs</value>
  </property>
  <property>
    <name>hbase.rpc.protection</name>
    <value>authentication</value>
  </property>
  <property>
    <name>zookeeper.session.timeout</name>
    <value>120000</value>
  </property>
  <property>
    <name>zookeeper.znode.parent</name>
    <value>/hbase</value>
  </property>
  <property>
    <name>zookeeper.znode.rootserver</name>
    <value>root-region-server</value>
  </property>
  <property>
    <name>hbase.zookeeper.quorum</name>
   <value>xx.xx.xx.1,xx.xx.xx.2,xx.xx.xx.3</value>   
  </property>
  <property>
    <name>hbase.zookeeper.property.clientPort</name>
    <value>2181</value>
  </property>
  <property>
    <name>hbase.rest.ssl.enabled</name>
    <value>false</value>
  </property>
  <property>  
	<name>hbase.coprocessor.user.region.classes</name>  
	<value>org.apache.hadoop.hbase.coprocessor.AggregateImplementation</value>  
	
  </property> 
  <property>
   <name>hbase.client.ipc.pool.type</name>
   <value>RoundRobinPool</value>
</property>
<property>
   <name>hbase.client.ipc.pool.size</name>
   <value>10</value>
</property>
</configuration>

另,如果你需要用到hdfs来作为数据存储,需要引用配置(比如flink实时将数据同步至hdfs,需要代码引用以下配置文件或代码key,value映射配置)

hdfs-site.xml:

<?xml version="1.0" encoding="UTF-8"?>

<!--Autogenerated by Cloudera Manager-->
<configuration>
  <property>
    <name>dfs.nameservices</name>
    <value>nameservice1</value>
  </property>
  <property>
    <name>dfs.client.failover.proxy.provider.nameservice1</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  </property>
  <property>
    <name>dfs.ha.automatic-failover.enabled.nameservice1</name>
    <value>true</value>
  </property>
  <property>
    <name>ha.zookeeper.quorum</name>
    <value>dev-hdp002:2181,dev-hdp003:2181,dev-hdp004:2181</value>
  </property>
  <property>
    <name>dfs.ha.namenodes.nameservice1</name>
    <value>namenode48,namenode289</value>
  </property>
  <property>
    <name>dfs.namenode.rpc-address.nameservice1.namenode48</name>
    <value>dev-hdp002:8020</value>
  </property>
  <property>
    <name>dfs.namenode.servicerpc-address.nameservice1.namenode48</name>
    <value>dev-hdp002:8022</value>
  </property>
  <property>
    <name>dfs.namenode.http-address.nameservice1.namenode48</name>
    <value>dev-hdp002:50070</value>
  </property>
  <property>
    <name>dfs.namenode.https-address.nameservice1.namenode48</name>
    <value>dev-hdp002:50470</value>
  </property>
  <property>
    <name>dfs.namenode.rpc-address.nameservice1.namenode289</name>
    <value>dev-hdp003:8020</value>
  </property>
  <property>
    <name>dfs.namenode.servicerpc-address.nameservice1.namenode289</name>
    <value>dev-hdp003:8022</value>
  </property>
  <property>
    <name>dfs.namenode.http-address.nameservice1.namenode289</name>
    <value>dev-hdp003:50070</value>
  </property>
  <property>
    <name>dfs.namenode.https-address.nameservice1.namenode289</name>
    <value>dev-hdp003:50470</value>
  </property>
  <property>
    <name>dfs.replication</name>
    <value>2</value>
  </property>
  <property>
    <name>dfs.blocksize</name>
    <value>134217728</value>
  </property>
  <property>
    <name>dfs.client.use.datanode.hostname</name>
    <value>false</value>
  </property>
  <property>
    <name>fs.permissions.umask-mode</name>
    <value>022</value>
  </property>
  <property>
    <name>dfs.encrypt.data.transfer.algorithm</name>
    <value>3des</value>
  </property>
  <property>
    <name>dfs.encrypt.data.transfer.cipher.suites</name>
    <value>AES/CTR/NoPadding</value>
  </property>
  <property>
    <name>dfs.encrypt.data.transfer.cipher.key.bitlength</name>
    <value>256</value>
  </property>
  <property>
    <name>dfs.namenode.acls.enabled</name>
    <value>true</value>
  </property>
  <property>
    <name>dfs.client.use.legacy.blockreader</name>
    <value>false</value>
  </property>
  <property>
    <name>dfs.client.read.shortcircuit</name>
    <value>false</value>
  </property>
  <property>
    <name>dfs.domain.socket.path</name>
    <value>/var/run/hdfs-sockets/dn</value>
  </property>
  <property>
    <name>dfs.client.read.shortcircuit.skip.checksum</name>
    <value>false</value>
  </property>
  <property>
    <name>dfs.client.domain.socket.data.traffic</name>
    <value>false</value>
  </property>
  <property>
    <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
    <value>true</value>
  </property>
</configuration>

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值