Configuration conf=new Configuration();
Job job=Job.getInstance(conf,“crime1”);
job.setJobName(“crime1”);
job.setJarByClass(Crime.class);
job.setMapperClass(CMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setReducerClass(CReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
job.waitForCompletion(true);
}
}
这里是按从小到大的顺序进行警区的排序,由此看出犯罪数量最少的警区是警区1,犯罪数量最多的是警区934,判断出来警区1 的治安比较好。
2.这里是按警区进行分组,然后统计每一组的逮捕数量,逮捕数量即为警察在案件发生后,成功抓捕犯罪人员的次数。逮捕数量高,说明这个警区的警察抓捕成功率更高,更容易抓到犯罪人员。
//这里的代码实现的是按地区District进行分组,然后统计逮捕数
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class Crime {
private static class CMapper extends Mapper<LongWritable , Text, Text, IntWritable>{
Text dis=new Text();
//private final static IntWritable one = new IntWritable();
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{
String line = value.toString();
String[] l = line.split(“,”);
dis.set(l[11]);
String arr=l[8];
String a =“FALSE”;
int b=0;
if (arr.equals(a)){
b=1;
}
else {
b=0;
}
if(b==0){
context.write(dis, new IntWritable(1));
}
}
}
public static class CReduce extends Reducer<Text, IntWritable, Text, IntWritable>{
public