Version :hadoop1.2.1; hbaes0.94.16;
HBase写入数据方式(参考:《HBase The Definitive Guide》),可以简单分为下面几种:
1. 直接使用HTable进行导入,代码如下:
package hbase.curd;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
public class PutExample {
/**
* @param args
* @throws IOException
*/
private HTable table = HTableUtil.getHTable("testtable");
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
PutExample pe = new PutExample();
pe.putRows();
}
public void putRows(){
List<Put> puts = new ArrayList<Put>();
for(int i=0;i<10;i++){
Put put = new Put(Bytes.toBytes("row_"+i));
Random random = new Random();
if(random.nextBoolean()){
put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("colfam1_qual1_value_"+i));
}
if(random.nextBoolean()){
put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("colfam1_qual1_value_"+i));
}
if(random.nextBoolean()){
put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual3"), Bytes.toBytes("colfam1_qual1_value_"+i));
}
if(random.nextBoolean()){
put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual4"), Bytes.toBytes("colfam1_qual1_value_"+i));
}
if(random.nextBoolean()){
put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual5"), Bytes.toBytes("colfam1_qual1_value_"+i));
}
puts.add(put);
}
try{
table.put(puts);
table.close();
}catch(Exception e){
e.printStackTrace();
return ;
}
System.out.println("done put rows");
}
}
其中HTableUtil如下:
package hbase.curd;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.util.Bytes;
public class HTableUtil {
private static HTable table;
private static Configuration conf;
static{
conf =HBaseConfiguration.create();
conf.set("mapred.job.tracker", "hbase:9001");
conf.set("fs.default.name", "hbase:9000");
conf.set("hbase.zookeeper.quorum", "hbase");
try {
table = new HTable(conf,"testtable");
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public static Configuration getConf(){
return conf;
}
public static HTable getHTable(String tablename){
if(table==null){
try {
table= new HTable(conf,tablename);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
return table;
}
public static byte[] gB(String name){
return Bytes.toBytes(name);
}
}
这一种是没有使用MR的,下面介绍的几种方式都是使用MR的。
2.1 从HDFS文件导入HBase,继承自Mapper,代码如下:
package hbase.mr;
import java.io.IOException;
import hbase.curd.HTableUtil;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class ImportFromFile {
/**
* 从文件导入到HBase
* @param args
*/
public static final String NAME="ImportFromFile";
public enum Counters{LINES}
static class ImportMapper extends Mapper<LongWritable,Text,
ImmutableBytesWritable,Writable>{
private byte[] family =null;
p