import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class Crime {
private static class CMapper extends Mapper<LongWritable , Text, Text, Text>{
Text dis=new Text();
Text cnumber =new Text();
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{
String line = value.toString();
String[] l = line.split(",");
dis.set(l[11]);
cnumber.set(l[1]);
context.write(dis, cnumber);
}
}
public static class CReduce extends Reducer<Text, Text, Text, IntWritable>{
private int sum=0;
public void reduce(Text key, Iterable
for(Text t : values){
sum=sum+1;
}
context.write(new Text(key),new IntWritable(sum));
}
}
public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
Job job=Job.getInstance(conf,“crime1”);
job.setJobName(“crime1”);
job.setJarByClass(Crime.class);
job.setMapperClass(CMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setReducerClass(CReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
job.waitForCompletion(true);
}
}