hdfs在生产应用中主要是客户端的开发,其核心步骤是从hdfs提供的api中构造一个HDFS的访问客户端对象,然后通过该客户端对象操作HDFS上的文件(如增删改查)。案例如下:
package lyl.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
public class HadoopFileSystem {
public static void main(String[] args) throws IOException {
read();
}
/**
* 从HDFS上读取文件输出到控制台
* @throws IOException
*/
public static void read() throws IOException {
Configuration configuration = new Configuration();
FileSystem fileSystem = FileSystem.get(configuration);
Path path = new Path("/user/flume/hiveLogs/FlumeData.1517279361739");
FSDataInputStream inputStream = file