如果测试读完存到本地文件的话,直接用fs.copyToLocalFile()方法即可,但是如果测试读到缓存的性能,则需要用到FSDataInputStream
上代码:
/**
* @ProjectName: Hadoop预研平台
*/
package com.hikvision.hdfs.test.performance;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
/**
* <p>测试hdfs读到本地缓存的性能</p>
* @author 2013-1-10 下午5:05:36
* @version V1.0
* @modificationHistory===逻辑或功能性重大变更记录===
*/
public class TestReadPerformence {
/**
* @author wanglongyf2 2013-1-10 下午5:05:37
* @param args: 文件路经 每次读的大小(M为单位)
* @throws IOException
*/
public static void main(String[] args) throws IOException {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "node1,node2,node3");
conf.set(/*"fs.defaultFS"*/"fs.default.name","hdfs://node1");
FileSystem hdfs = FileSystem.get(conf);
String fileName = "/hbase/testscan/5f525e5fc37da7f1fc0c42b4a5a3be0b/f1/2820765239803238635";//1g
if(args.length >= 1) {
fileName = args[0];
}
int mNumber = 1;
if(args.length ==2 ) {
mNumber = Integer.valueOf(args[1]);
}
//fileName = "/hbase/testscan/5f525e5fc37da7f1fc0c42b4a5a3be0b/f1/6774381191569120295";
FSDataInputStream dis = hdfs.open(new Path(fileName));
long start = System.currentTimeMillis();
byte[] b = new byte[mNumber * 1024 * 1024];
int read = dis.read(b);
long total = 0;
while(read > 0) {
total += read;
read = dis.read(b);
// System.out.println(total/1024/1024);
}
long stop = System.currentTimeMillis();
long use = stop - start;
double rate = 1000.0 * total / (use * 1024 * 1024);
System.out.println("total: " + total +". use: " + use + ". rate:M/s:" + rate );
}
}