MappperClass.java文件
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.LineReader;
//数据点坐标形式
class DmRecord {
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
private double xpodouble; //X坐标
private double ypodouble; //Y坐标
//构造函数
public DmRecord()
{
}
public DmRecord(String name,double x,double y)
{
this.name = name;
this.xpodouble = x;
this.ypodouble = y;
}
public double getXpoint()
{
return xpodouble;
}
public void setXpoint(double xpodouble)
{
this.xpodouble = xpodouble;
}
public double getYpoint()
{
return ypodouble;
}
public void setYpoint(double ypodouble) {
this.ypodouble = ypodouble;
}
//计算俩点之间距离
public double distance(DmRecord record)
{
return Math.sqrt(Math.pow(this.xpodouble-record.xpodouble, 2)+Math.pow(this.ypodouble-record.ypodouble, 2));
}
}
//辅助类
class DmRecordParser
{
private Map<String,DmRecord> urlMap = new HashMap<String,DmRecord>();
//读取聚类中心点文件,初始化聚类点
public void initialize(String file) throws IOException
{
//指定configuration
Configuration config = new Configuration();
//定义一个DataInputStream
FSDataInputStream indic = null;
FileSystem fs = FileSystem.get(URI.create(file),config);
indic = fs.open(new Path(file));
String t = indic.readLine();
while (t != null)
{
//split函数注意点:split(" ")如果中间有不止一个空格,那么它会把多余空格也当做字符,所以一定要保证只有一个空格
//由于hadoop在key和value之间会输出制表符\t所以用空格代替,一个制表符相当4个空格那么大
String [] strKey = t.replace("\t", " ").split(" ");;
urlMap.put(strKey[0],parse(t));
t = indic.readLine();
}
}
//从new;old文件中获取中心点的坐标
public static List<ArrayList<Double>> getCenters(String inputpath){