package com.xhb1.test;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.test.UserBroserCount;
public class RunBroserCount {
public static void main(String[] args) throws Exception {
String inputPath = "/logdata/userLogFile.log";
String outputPath = "/oozie/test1";
new RunBroserCount().runBroserCount(inputPath, outputPath);
}
public void runBroserCount(String inputPath, String outputPath)
throws IOException, InterruptedException, ClassNotFoundException {
Configuration conf = new Configuration();
conf.set("fs.default.name", "hdfs://192.168.0.170:4310");
//conf.set("hadoop.job.user", "root");
conf.set("mapred.job.tracker", "192.168.0.170:4311");
Job job = new Job(conf, "browserCount");
((JobConf) job.getConfiguration()).setJar("E:/workspaces/pmp/BrowserCount/target/pr.jar");
job.setJarByClass(UserBroserCount.class);
job.setMapOutputValueClass(IntWritable.class);
job.setMapperClass(UserBroserCount.UserBroserCountMapper.class);
job.setReducerClass(UserBroserCount.UserBroserCountReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(inputPath));
FileOutputFormat.setOutputPath(job, new Path(outputPath));
job.waitForCompletion(true);
}
}
hadoop客户端提交job的java代码
最新推荐文章于 2021-02-23 19:04:01 发布