import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.conf.Configuration
import java.net.URI
import org.apache.hadoop.fs.FSDataInputStream;
import java.io.InputStreamReader;
import java.io.BufferedReader;
case object MetisPartition extends PartitionStrategy {
val metisMap =new scala.collection.mutable.HashMap[Int,Int]
/**
* 这里由于不知道如何引入SparkContext的引用,而Spark中只允许有一个SparkContex存在,不能自己新建,sc.textFile("hdfs://XXX")无法用
*/
def loadMetisFile() {
try {
val hdfs = FileSystem.get(URI.create("hdfs://192.168.0.100:9000/test/Web_metis_Final_Input.txt.part.6"), new Configuration)
var fp : FSDataInputStream = hdfs.open(new Path("hdfs://192.168.0.100:9000/test/Web_metis_Final_Input.txt.part.6"))
var isr : InputStreamReader = new InputStreamReader(fp)
var bReader : BufferedReader = new BufferedReader(isr)
var id = 1
var line:String = bReader.readLine()
while(line!=null) {
if ( !"".equals(line)) {
metisMap.put(id, line.toInt)
id = id +1
}
}
isr.close() ;
bReader.close() ;
println("metisMap size: " + metisMap.size)
// metisMap.foreach{case (e,i) => println(e,i)}
} catch {
case ex: Exception => { // Handle missing file
ex.printStackTrace()
}
}
}
override def getPartition(src: VertexId, dst: VertexId, numParts: PartitionID): PartitionID = {
var s = metisMap.get(src.hashCode())
if(!s.isEmpty)
return s.getOrElse(0)
else
throw new IllegalArgumentException("Metis can't find partition!")
}
}