Mapper代码
package bulkload;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class BankRecordMapper extends Mapper<Long, Text, ImmutableBytesWritable, Put> {
private ImmutableBytesWritable k2 = new ImmutableBytesWritable();
@Override
protected void map(Long key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
if(line != null && !"".equalsIgnoreCase(line)){
//2. 对数据进行切割操作
String[] fields = line.split(",");
//3. 封装k2 和 v2的数据
//封装 k2
String rowkeyStr = fields[0];
k2.set(rowkeyStr.getBytes());
// 封装v2
Put v2 = new Put(rowkeyStr.getBytes());
v2.addColumn("C1".getBytes(),"code".getBytes(),fields[1].getBytes());
v2.addColumn("C1".getBytes(),"rec_account".getBytes(),fields[2].getBytes());
v2.addColumn("C1".getBytes(),"rec_bank_name".getBytes(),fields[3].getBytes());
v2.addColumn("C1".getBytes(),"rec_name".getBytes(),fields[4].getBytes());
v2.addColumn("C1".getBytes(),"pay_account".getBytes(),fields[5].getBytes());
v2.addColumn("C1".getBytes(),"pay_name".getBytes(),fields[6].getBytes());
v2.addColumn("C1".getBytes(),"pay_comments".getBytes(),fields[7].getBytes());
v2.addColumn("C1".getBytes(),"pay_channel".getBytes(),fields[8].getBytes());
v2.addColumn("C1".getBytes(),"pay_way".getBytes(),fields[9].getBytes());
v2.addColumn("C1".getBytes(),"status".getBytes(),fields[10].getBytes());
v2.addColumn("C1".getBytes(),"timestamp".getBytes(),fields[11].getBytes());
v2.addColumn("C1".getBytes(),"money".getBytes(),fields[12].getBytes());
//4. 输出
context.write(k2,v2);
}
}
}
job 代码
package bulkload;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import java.io.IOException;
public class BulkloaderDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance();
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(BankRecordMapper.class);
TextInputFormat.setInputPaths(job,new Path("data/bank_record.csv"));
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(Put.class);
job.setOutputFormatClass(HFileOutputFormat2.class);
job.setNumReduceTasks(0);
HFileOutputFormat2.setOutputPath(job,new Path("hdfs://node1:8020/bulkLoad/output"));
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum","node1:2181,node2:2181,node3:2181");
Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(TableName.valueOf("BANK_RECORD:RECORD"));
HFileOutputFormat2.configureIncrementalLoad(job,table,conn.getRegionLocator(TableName.valueOf("BANK_RECORD:RECORD")));
//4. 提交任務
boolean flag = job.waitForCompletion(true);
//5. 退出程序
System.exit(flag ? 0 : 1);
}
}