线程导入大数据入库_hbase 多线程 大数据量入库

1.插入数据库

@Test

public void poolinserts(){

ListpoolInserts = new ArrayList();

final Connection conn =HbaseUtils.conn();

final String tableName = "ns1:t2";

ThreadPoolExecutor pool = ThreadPool.getPool();

ExecutorService fixPool = ThreadPool.getFixPool();

poolInserts.add(new PoolInsert(0,300,tableName,conn));

poolInserts.add(new PoolInsert(300,600,tableName,conn));

poolInserts.add(new PoolInsert(600,900,tableName,conn));

poolInserts.add(new PoolInsert(900,1000,tableName,conn));

for (PoolInsert p:poolInserts){

pool.execute(p);

}

while (!pool.isTerminated()){

pool.shutdown();

}

System.out.println("成功");

}

2.hbase连接工具

public static Connection conn(){

Configuration conf = HBaseConfiguration.create();

try {

return ConnectionFactory.createConnection(conf);

}catch (IOException e){

log.error("连接异常:{}",e);

}

return null;

}

3.1 hdfs-site.xml配置

dfs.replication

3

dfs.nameservices

mycluster

dfs.ha.namenodes.mycluster

nn1,nn2

dfs.namenode.rpc-address.mycluster.nn1

s226:8020

dfs.namenode.rpc-address.mycluster.nn2

s229:8020

dfs.namenode.http-address.mycluster.nn1

s226:50070

dfs.namenode.http-address.mycluster.nn2

s229:50070

dfs.namenode.shared.edits.dir

qjournal://s227:8485;s228:8485;s229:8485/mycluster

dfs.client.failover.proxy.provider.mycluster

org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider

dfs.ha.fencing.methods

sshfence

shell(/bin/true)

dfs.ha.fencing.ssh.private-key-files

/home/centos/.ssh/id_rsa

dfs.journalnode.edits.dir

/home/centos/hadoop/journal

dfs.ha.automatic-failover.enabled

true

Whether automatic failover is enabled. See the HDFS High

Availability documentation for details on automatic HA

configuration.

3.2 hbase-site.xml配置文件

hbase.cluster.distributed

true

hbase.rootdir

hdfs://mycluster/hbase

hbase.zookeeper.quorum

s227:2181,s228:2181,s229:2181

hbase.zookeeper.property.dataDir

/home/centos/zookeeper

4.需要插入的对象封装

@Slf4j

@Data

public class PoolInsert implements Runnable{

private int start;

private int end;

private String tableName;

private Connection conn;

public PoolInsert(int start, int end, String tableName, Connection conn) {

this.start = start;

this.end = end;

this.tableName = tableName;

this.conn = conn;

}

@Override

public void run() {

System.out.println(1111);

//inset(start,end,tableName,conn);

try {

TableName tname = TableName.valueOf(tableName);

HTable table = (HTable) conn.getTable(tname);

DecimalFormat df = new DecimalFormat("0000");

table.setAutoFlush(false);

for (int j = start; j < end; j++) {

byte[] rowkey = Bytes.toBytes("row" + df.format(j));

Put put = new Put(rowkey);

put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("id"), Bytes.toBytes(end - 1));

put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("name"), Bytes.toBytes("name" + j));

table.put(put);

if (j % 2000 == 0) {

table.flushCommits();

}

}

table.flushCommits();

}catch(Exception e){

log.error("插入异常:{}",e);

}

System.out.println(2222);

}

//百万插入

private String inset(int start, int end, String tableName, Connection conn ) {

try {

TableName tname = TableName.valueOf(tableName);

HTable table = (HTable) conn.getTable(tname);

DecimalFormat df = new DecimalFormat("0000");

table.setAutoFlush(false);

for (int j = start; j < end; j++) {

byte[] rowkey = Bytes.toBytes("row" + df.format(j));

Put put = new Put(rowkey);

put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("id"), Bytes.toBytes(end - 1));

put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("name"), Bytes.toBytes("name" + j));

table.put(put);

if (j % 2000 == 0) {

table.flushCommits();

}

}

table.flushCommits();

}catch(Exception e){

log.error("插入异常:{}",e);

}

return "完成:"+(end-start);

}

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值