packagemapreduce;importjava.io.IOException;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.fs.Path;importorg.apache.hadoop.io.DoubleWritable;importorg.apache.hadoop.io.IntWritable;importorg.apache.hadoop.io.Text;importorg.apache.hadoop.mapreduce.Job;importorg.apache.hadoop.mapreduce.Mapper;importorg.apache.hadoop.mapreduce.Reducer;importorg.apache.hadoop.mapreduce.lib.input.FileInputFormat;importorg.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import mapreduce.Pi;//下面生成随机数的时候需要这个类,该类即上面那部分代码
/***
*@authorsakura
* 2019.9.3
* 利用MapReduce计算π值
**/
public classCalPI {public static class PiMapper extends Mapper{int number=0; //定义一个变量,用来存放一共生成的点数//读取文件,每一行都是一个map 本程序读取的文件为十行,每行都是100000
public void map(Object key, Text value, Context context) throwsIOException, InterruptedException {int pointNum = Integer.parseInt(value.toString());//将读取到的那一行赋值给pointNum
number=number+pointNum;//将总点数赋值给number
int[] base = {2,5};//生成随机点所用
Pi test = new Pi(base);//生成随机点所用
for(int x = 0; x < number; x++){ //循环生成随机点
double[] t = test.getNext();//随机生成点,并将坐标存入数组
System.out.println(t[0] + "\t" + t[1]);//控制台输出随机点的坐标
IntWritable result = new IntWritable(0); //定义输出值
if((t[0]*t[0]+t[1]*t[1])<=1)//判断生成的点是否在扇形面积内
{
result= new IntWritable(1);//如果在,将输出值赋值为1
}
value.set(String.valueOf(number));//定义输出键,输出键为当前生成点的总数
context.write(value, result);//写入
}
}
}public static class PiReducer extends Reducer{private DoubleWritable result = new DoubleWritable();//声明输出值
public void reduce(Text key, Iterable values,Context context) throwsIOException, InterruptedException {double pointNum =Double.parseDouble(key.toString());//获取输入的键
double sum = 0;//定义总数
for (IntWritable val : values) {//循环从values里取值,累加和赋值给sum
sum +=val.get();
}
result.set(sum/pointNum*4);//将计算得到的π值赋值给result
context.write(key, result);//将键值,即生成点总数,和result,即计算得到的π值作为一个键值对写入context
}
}public static void main(String[] args) throwsException {
Configuration conf= newConfiguration();
Job job= Job.getInstance(conf,"calculate pi");
job.setJarByClass(CalPI.class);
job.setMapperClass(PiMapper.class);
job.setReducerClass(PiReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
Path in= new Path("hdfs://192.168.68.130:9000/user/hadoop/nai.txt"); //读入文件地址
Path out = new Path("hdfs://192.168.68.130:9000/user/hadoop/output4"); //输出文件地址,output4不能存在
FileInputFormat.addInputPath(job, in);
FileOutputFormat.setOutputPath(job, out);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}