hdfs的实验总结_HDFS文件读取实验

'''

#package org.hadoop.sort;

import java.io.BufferedReader;

import java.lang.Object;

import java.util.List;

//import java.util.Arrays;

import java.io.InputStreamReader;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.FSDataInputStream;

import org.apache.hadoop.fs.FSDataOutputStream;

import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.fs.Path;

public class HDFSfileifexist {

public static void myWrite(String content, String name){

try

{

Configuration conf=new Configuration();

conf.set("fs.defaultFS", "hdfs://localhost:9000");

conf.set("fs.hdfs.omp", "org.apache.hadoop.hdfs.DistributedFileSystem");

FileSystem fs=FileSystem.get(conf);

byte[] buff=content.getBytes();

String filename=name;

FSDataOutputStream os=fs.create(new Path(filename));

os.write(buff,0,buff.length);

System.out.println("Creat:"+filename);

os.close();

fs.close();

}

catch(Exception e)

{

e.printStackTrace();

}

}

public static String myRead(String fileName){

String content = null;

try

{

Configuration conf=new Configuration();

conf.set("fs.defaultFS", "hdfs://localhost:9000");

conf.set("fs.hdfs.omp", "org.apache.hadoop.hdfs.DistributedFileSystem");

FileSystem fs=FileSystem.get(conf);

Path file=new Path(fileName);

FSDataInputStream getIt=fs.open(file);

BufferedReader d=new BufferedReader(new InputStreamReader(getIt));

content=d.readLine();

System.out.println(""+content);

d.close();

fs.close();

}

catch(Exception e)

{

e.printStackTrace();

}

return content;

}

public static String merge(String s1){

String a = s1;

String [] s = a.split(" ");

String b = "";

s[0].compareTo(s[1]);

int temp;

int i,j;

for(i=0;i

{

for(j=0;j

{

if(Integer.parseInt(s[j])>Integer.parseInt(s[j+1]))

{

String temp1=s[j];

s[j]=s[j+1];

s[j+1]=temp1;

}

}

}

//Arrays.sort(s);

//Connections.sort();

System.out.println("a:"+a);

for(int i1 = 0; i1 < s.length; i1++){

b += s[i1]+" ";

}

System.out.println(b);

return b;

}

public static void main(String[] args) {

String s1 = myRead("hdfs://localhost:9000/input/1_3.txt");

//String s2 = myRead("hdfs://localhost:9000/input/1_2.txt");

String s3 = merge(s1);

myWrite(s3,"hdfs://localhost:9000/input/result.txt");

}

}

'''

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值