java hdfs进程_java写hdfs程序

1、hadoop默认临时数据文件是存储于Unix的tmp目录下(cd /tmp 包含hadoop-root等文件),如果不进行修改,linux系统重启后hadoop有可能出现不正常现象;故需要修改hadoop的临时文件存放目录

2、vim core-site.xml 配置如下,然后重启hadoop集群,不要对namenode重新进行格式化操作

修改datanode /var/hadoop/dfs/data/current 目录下VERSION文件的clusterid与namenode一致;然后启动集群正常

hadoop.tmp.dir

/var/hadoop

在namenode执行格式化操作后,会导致namenode重新生成clusterid,而datanode的clusterID值没变,

namenode与datanode clusterid不一致导致datanode启动异常;需要手动改成与namenode一致

3、测试时,可以关闭权限检查(否则没有权限访问),在namenode节点添加如下配置

vim hdfs-site.xml

dfs.permissions.enabled

false

xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

4.0.0

com.skcc

wordcount

0.0.1-SNAPSHOT

wordcount

count the word

UTF-8

2.7.3

junit

junit

4.12

org.apache.hadoop

hadoop-client

${hadoop.version}

org.apache.hadoop

hadoop-common

${hadoop.version}

org.apache.hadoop

hadoop-hdfs

${hadoop.version}

package com.skcc.hadoop;

import java.io.FileInputStream;

import java.io.IOException;

import java.io.InputStream;

import java.net.URL;

import java.text.NumberFormat;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.FSDataOutputStream;

import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.IOUtils;

public class HelloHDFS {

public HelloHDFS() {

// TODO Auto-generated constructor stub

}

public static FileSystem getFileSystemInstance() {

Configuration conf = new Configuration();

conf.set("fs.defaultFS", "hdfs://172.26.19.40:9000");

FileSystem fileSystem = null;

try {

fileSystem = FileSystem.get(conf);

} catch (IOException e) {

// TODO Auto-generated catch block

e.printStackTrace();

}

return fileSystem;

}

public static void getFileFromHDFS() throws Exception {

//URL 默认处理http协议, FsUrlStreamHandlerFactory 处理hdfs协议

URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());

URL url=new URL("hdfs://172.26.19.40:9000/10803060234.txt");

InputStream inputStream= url.openStream();

IOUtils.copyBytes(inputStream, System.out, 4096,true);

}

public static void getFileFromBaiDu() throws IOException {

URL url=new URL("http://skynet.skhynix-cq.com.cn/plusWare/Main.aspx");

InputStream inputStream= url.openStream();

IOUtils.copyBytes(inputStream, System.out, 4096,true);

}

public static void testHadoop() throws Exception {

FileSystem fileSystem = getFileSystemInstance();

Boolean success = fileSystem.mkdirs(new Path("/skcc"));

System.out.println("mkdirs is " + success);

success = fileSystem.exists(new Path("/10803060234.txt"));

System.out.println("file exists is " + success);

success = fileSystem.delete(new Path("/test2.data"),true);

System.out.println("delete dirs is " + success);

success = fileSystem.exists(new Path("/skcc"));

System.out.println("dirs exists is "+ success);

}

public static void uploadFileToHDFS() throws Exception {

FileSystem fileSystem = getFileSystemInstance();

String filename = "/test2.data";

// overwrite ==true

FSDataOutputStream outputStream = fileSystem.create(new Path(filename), true);

FileInputStream fis = new FileInputStream("D:\\2018\\u001.zip");

// IOUtils.copyBytes(fis, outputStream, 4096, true);

long totalLen = fis.getChannel().size();

long tmpSize = 0;

double readPercent = 0;

NumberFormat numberFormat = NumberFormat.getInstance();

numberFormat.setMaximumFractionDigits(0);

System.out.println("totalLen : " + totalLen + " available : " + fis.available());

byte[] buf = new byte[4096];

int len = fis.read(buf);

while (len != -1) {

tmpSize = tmpSize + len;

String result = numberFormat.format((float)tmpSize / (float)totalLen * 100 );

outputStream.write(buf,0,len);

System.out.println("Upload Percent : " + result + "%");

len = fis.read(buf);

}

}

}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值