Hadoop完整代码小程序

[list]
前言:Hadoop当时我们弄时几乎没有什么中文文档。现在介绍的资料已经很多了,我就不再赘述。
业务描述:设定inputpath和ouputpath,根据访问日志分析某一个应用访问某一个API的总次数和总流量,统计后分别输出到两个文件中。注意:本案例我是改自阿里巴巴文初的那篇文章,他用的是0.17的版本,现在新的版本有很大的变动,我使用最新的0.20.2来改写了。
[/list]

public class LogAnalysiser {
public static class MapClass
extends
org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, LongWritable>

{
public void map(LongWritable key, Text value,
OutputCollector<Text, LongWritable> output, Reporter reporter)

throws IOException

{
String line = value.toString();// 没有配置RecordReader,所以默认采用line的实现,key就是行号,value就是行内容

if (line == null || line.equals(""))

return;

String[] words = line.split(",");

if (words == null || words.length < 8)

return;

String appid = words[1];

String apiName = words[2];

LongWritable recbytes = new LongWritable(Long.parseLong(words[7]));

Text record = new Text();

record.set(new StringBuffer("flow::").append(appid)

.append("::").append(apiName).toString());

reporter.progress();

output.collect(record, recbytes);// 输出流量的统计结果,通过flow::作为前缀来标示。

record.clear();

record.set(new StringBuffer("count::").append(appid).append("::")
.append(apiName).toString());

output.collect(record, new LongWritable(1));// 输出次数的统计结果,通过count::作为前缀来标示

}
}
public static class PartitionerClass extends
org.apache.hadoop.mapreduce.Partitioner<Text, LongWritable>

{
public int getPartition(Text key, LongWritable value, int numPartitions)

{

if (numPartitions >= 2)// Reduce 个数,判断流量还是次数的统计分配到不同的Reduce

if (key.toString().startsWith("flow::"))

return 0;
else
return 1;
else

return 0;
}

/*public void configure(JobConf job) {
}*/
public void configure(Job job) {
}
}

public static class ReduceClass
extends
org.apache.hadoop.mapreduce.Reducer<Text, LongWritable, Text, LongWritable>

{
public void reduce(Text key, Iterator<LongWritable> values,

OutputCollector<Text, LongWritable> output, Reporter reporter)
throws IOException
{

Text newkey = new Text();

newkey.set(key.toString().substring(
key.toString().indexOf("::") + 2));

LongWritable result = new LongWritable();

long tmp = 0;

int counter = 0;

while (values.hasNext())// 累加同一个key的统计结果

{

tmp = tmp + values.next().get();

counter = counter + 1;// 担心处理太久,JobTracker长时间没有收到报告会认为TaskTracker已经失效,因此定时报告一下

if (counter == 1000)

{

counter = 0;

reporter.progress();

}

}

result.set(tmp);

output.collect(newkey, result);// 输出最后的汇总结果

}

}

public static class CombinerClass
extends
org.apache.hadoop.mapreduce.Reducer<Text, LongWritable, Text, LongWritable>

{

public void reduce(Text key, Iterator<LongWritable> values,

OutputCollector<Text, LongWritable> output, Reporter reporter)
throws IOException

{
LongWritable result = new LongWritable();
long tmp = 0;
while (values.hasNext())// 累加同一个key的统计结果
{

tmp = tmp + values.next().get();
}

result.set(tmp);
}

}

public static void main(String[] args)

{

try

{

run(args);

} catch (Exception e)

{

e.printStackTrace();

}

}

public static void run(String[] args) throws Exception

{

if (args == null || args.length < 2)

{

System.out.println("need inputpath and outputpath");

return;

}

String inputpath = args[0];

String outputpath = args[1];

String shortin = args[0];

String shortout = args[1];

if (shortin.indexOf(File.separator) >= 0)

shortin = shortin.substring(shortin.lastIndexOf(File.separator));

if (shortout.indexOf(File.separator) >= 0)

shortout = shortout.substring(shortout.lastIndexOf(File.separator));

SimpleDateFormat formater = new SimpleDateFormat("yyyy.MM.dd");

shortout = new StringBuffer(shortout).append("-")

.append(formater.format(new Date())).toString();

if (!shortin.startsWith("/"))

shortin = "/" + shortin;

if (!shortout.startsWith("/"))

shortout = "/" + shortout;

shortin = "/user/root" + shortin;

shortout = "/user/root" + shortout;

File inputdir = new File(inputpath);

File outputdir = new File(outputpath);

if (!inputdir.exists() || !inputdir.isDirectory())

{

System.out.println("inputpath not exist or isn't dir!");

return;

}

if (!outputdir.exists())

{

new File(outputpath).mkdirs();

}
Configuration conf = new Configuration();
Job job = new Job(conf, "analysis job");
job.setJarByClass(LogAnalysiser.class);
// JobConf conf = new JobConf(new Configuration(),
// LogAnalysiser.class);// 构建Config

FileSystem fileSys = FileSystem.get(conf);

fileSys.copyFromLocalFile(new Path(inputpath), new Path(shortin));// 将本地文件系统的文件拷贝到HDFS中

job.setJobName("analysisjob");

job.setOutputKeyClass(Text.class);// 输出的key类型,在OutputFormat会检查

job.setOutputValueClass(LongWritable.class); // 输出的value类型,在OutputFormat会检查

job.setMapperClass(MapClass.class);

job.setCombinerClass(CombinerClass.class);

job.setReducerClass(ReduceClass.class);

job.setPartitionerClass(PartitionerClass.class);

// job.set("mapred.reduce.tasks", "2");//老版本中的写法
// 强制需要有两个Reduce来分别处理流量和次数的统计,现在的版本中已经没有这个方法了
job.setNumReduceTasks(2);//新版本0.22.x中的方法
FileInputFormat.setInputPaths(job, shortin);// hdfs中的输入路径

FileOutputFormat.setOutputPath(job, new Path(shortout));// hdfs中输出路径

Date startTime = new Date();

System.out.println("Job started: " + startTime);

// JobClient.runJob(job);

Date end_time = new Date();

System.out.println("Job ended: " + end_time);

System.out.println("The job took "
+ (end_time.getTime() - startTime.getTime()) / 1000
+ " seconds.");

// 删除输入和输出的临时文件

fileSys.copyToLocalFile(new Path(shortout), new Path(outputpath));

fileSys.delete(new Path(shortin), true);

fileSys.delete(new Path(shortout), true);

}
}




//执行类
public class ExampleDriver {

public static void main(String argv[]){

ProgramDriver pgd = new ProgramDriver();

try {

pgd.addClass("analysislog", LogAnalysiser.class, "A map/reduce program that analysis log .");

pgd.driver(argv);

}

catch(Throwable e){

e.printStackTrace();

}

}

}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值