1.插入数据库
@Test
public void poolinserts(){
ListpoolInserts = new ArrayList();
final Connection conn =HbaseUtils.conn();
final String tableName = "ns1:t2";
ThreadPoolExecutor pool = ThreadPool.getPool();
ExecutorService fixPool = ThreadPool.getFixPool();
poolInserts.add(new PoolInsert(0,300,tableName,conn));
poolInserts.add(new PoolInsert(300,600,tableName,conn));
poolInserts.add(new PoolInsert(600,900,tableName,conn));
poolInserts.add(new PoolInsert(900,1000,tableName,conn));
for (PoolInsert p:poolInserts){
pool.execute(p);
}
while (!pool.isTerminated()){
pool.shutdown();
}
System.out.println("成功");
}
2.hbase连接工具
public static Connection conn(){
Configuration conf = HBaseConfiguration.create();
try {
return ConnectionFactory.createConnection(conf);
}catch (IOException e){
log.error("连接异常:{}",e);
}
return null;
}
3.1 hdfs-site.xml配置
dfs.replication
3
dfs.nameservices
mycluster
dfs.ha.namenodes.mycluster
nn1,nn2
dfs.namenode.rpc-address.mycluster.nn1
s226:8020
dfs.namenode.rpc-address.mycluster.nn2
s229:8020
dfs.namenode.http-address.mycluster.nn1
s226:50070
dfs.namenode.http-address.mycluster.nn2
s229:50070
dfs.namenode.shared.edits.dir
qjournal://s227:8485;s228:8485;s229:8485/mycluster
dfs.client.failover.proxy.provider.mycluster
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.ha.fencing.methods
sshfence
shell(/bin/true)
dfs.ha.fencing.ssh.private-key-files
/home/centos/.ssh/id_rsa
dfs.journalnode.edits.dir
/home/centos/hadoop/journal
dfs.ha.automatic-failover.enabled
true
Whether automatic failover is enabled. See the HDFS High
Availability documentation for details on automatic HA
configuration.
3.2 hbase-site.xml配置文件
hbase.cluster.distributed
true
hbase.rootdir
hdfs://mycluster/hbase
hbase.zookeeper.quorum
s227:2181,s228:2181,s229:2181
hbase.zookeeper.property.dataDir
/home/centos/zookeeper
4.需要插入的对象封装
@Slf4j
@Data
public class PoolInsert implements Runnable{
private int start;
private int end;
private String tableName;
private Connection conn;
public PoolInsert(int start, int end, String tableName, Connection conn) {
this.start = start;
this.end = end;
this.tableName = tableName;
this.conn = conn;
}
@Override
public void run() {
System.out.println(1111);
//inset(start,end,tableName,conn);
try {
TableName tname = TableName.valueOf(tableName);
HTable table = (HTable) conn.getTable(tname);
DecimalFormat df = new DecimalFormat("0000");
table.setAutoFlush(false);
for (int j = start; j < end; j++) {
byte[] rowkey = Bytes.toBytes("row" + df.format(j));
Put put = new Put(rowkey);
put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("id"), Bytes.toBytes(end - 1));
put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("name"), Bytes.toBytes("name" + j));
table.put(put);
if (j % 2000 == 0) {
table.flushCommits();
}
}
table.flushCommits();
}catch(Exception e){
log.error("插入异常:{}",e);
}
System.out.println(2222);
}
//百万插入
private String inset(int start, int end, String tableName, Connection conn ) {
try {
TableName tname = TableName.valueOf(tableName);
HTable table = (HTable) conn.getTable(tname);
DecimalFormat df = new DecimalFormat("0000");
table.setAutoFlush(false);
for (int j = start; j < end; j++) {
byte[] rowkey = Bytes.toBytes("row" + df.format(j));
Put put = new Put(rowkey);
put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("id"), Bytes.toBytes(end - 1));
put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("name"), Bytes.toBytes("name" + j));
table.put(put);
if (j % 2000 == 0) {
table.flushCommits();
}
}
table.flushCommits();
}catch(Exception e){
log.error("插入异常:{}",e);
}
return "完成:"+(end-start);
}
}