关闭

Container exited with a non-zero exit code 1 .Failing this attempt.. Failing the application.

标签: HADOOP从本地或者HDFS导入数据到大数据HBASE
476人阅读 评论(0) 收藏 举报
分类:

简单的代码实现不进行详细的说明:

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

public class ImportToHbase {

@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {
final Configuration configuration = new Configuration();
configuration.set("hbase.zookeeper.quorum", "master");
configuration.set(TableOutputFormat.OUTPUT_TABLE, "test3");
configuration.set("dfs.socket.timeout", "180000");
final Job job = new Job(configuration, ImportToHbase.class.getSimpleName());
job.setJarByClass(ImportToHbase.class);
job.setMapperClass(MyMap.class);
job.setReducerClass(MyReducer.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TableOutputFormat.class);
FileInputFormat.setInputPaths(job, "hdfs://master:9000/bbblll");
System.exit(job.waitForCompletion(true)?0:1);

}
static class MyMap extends Mapper<LongWritable, Text, IntWritable, Text>{
Text v2 = new Text();
int i=0;
@Override
protected void map(LongWritable key, Text value,
Context context)
throws IOException, InterruptedException {
try {
i+=1;
int rowKey =i;
v2.set(value.toString());
context.write(new IntWritable(rowKey), v2);
} catch (NumberFormatException e) {
System.out.println("出错了"+i+" "+e.getMessage());
}
}
}
static class MyReducer extends TableReducer<IntWritable, Text, NullWritable>{
@Override
protected void reduce(IntWritable k2, Iterable<Text> v2s,
Context context)
throws IOException, InterruptedException {
for (Text text : v2s) {
final String[] splited = text.toString().split("\t");
final Put put = new Put(Bytes.toBytes(k2.toString()));

put.add(Bytes.toBytes("artitle"), Bytes.toBytes("tile"), Bytes.toBytes(splited[1]));
put.add(Bytes.toBytes("artitle"), Bytes.toBytes("tag"), Bytes.toBytes(splited[2]));
context.write(NullWritable.get(), put);
}
}
}

}

我主要说一下在运行的过程中出现的一些错误:(在widows的eclipse中--导入编程所需要的包)


这个错误是由于资源的分配出现的,所以我对于yarn-site.xml和mapred-site.xml进行了修改(这个地方在网上搜了好久,看懂了一些原理,但是没有具体的解决方法)---一下仅说出自己的理由:

mapred-site.xml:


yarn-sit.xml:


在之前配置的前面加上vix.

但是在配置之后,由于在HBASE的表的问题上出现了一点小问题,经过对表进行改进,从而达到了目的



0
0

查看评论
* 以上用户言论只代表其个人观点,不代表CSDN网站的观点或立场
    个人资料
    • 访问:7767次
    • 积分:285
    • 等级:
    • 排名:千里之外
    • 原创:19篇
    • 转载:16篇
    • 译文:0篇
    • 评论:1条