Hadoop 日常学习bug调试(一)

(1)下载文件(方法一):

代码如下:

package hdfs;

import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.io.FileOutputStream;
import java.io.IOException;

public class HdfsUtil
{
    public static void main(String[] args) throws IOException
    {
        // to upload file to hdfs
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.get(conf);
        FSDataInputStream in = fs.open(new Path("hdfs://192.168.1.108:9000/hadoop-2.10.0.tar.gz"));
        FileOutputStream out = new FileOutputStream("F:\\大数据学习\\8天学完hadoop\\weekend110-第2天\\hadoop-2.10.0.tar.gz");
        IOUtils.copy(in, out);
    }
}

报错:

log4j:WARN No appenders could be found for logger (org.apache.hadoop.util.Shell).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Exception in thread "main" java.lang.IllegalArgumentException: Wrong FS: hdfs://192.168.1.108:9000/hadoop-2.10.0.tar.gz, expected: file:///
    at org.apache.hadoop.fs.FileSystem.checkPath(FileSystem.java:773)
    at org.apache.hadoop.fs.RawLocalFileSystem.pathToFile(RawLocalFileSystem.java:86)
    at org.apache.hadoop.fs.RawLocalFileSystem.deprecatedGetFileStatus(RawLocalFileSystem.java:626)
    at org.apache.hadoop.fs.RawLocalFileSystem.getFileLinkStatusInternal(RawLocalFileSystem.java:857)
    at org.apache.hadoop.fs.RawLocalFileSystem.getFileStatus(RawLocalFileSystem.java:621)
    at org.apache.hadoop.fs.FilterFileSystem.getFileStatus(FilterFileSystem.java:446)
    at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSInputChecker.<init>(ChecksumFileSystem.java:146)
    at org.apache.hadoop.fs.ChecksumFileSystem.open(ChecksumFileSystem.java:347)
    at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:914)
    at hdfs.HdfsUtil.main(HdfsUtil.java:19)

对conf变量设置fs.defaultFS参数问题解决

conf.set("fs.defaultFS","hdfs://192.168.1.108:9000/");

二:上传文件

代码如下:

public static void upLoad() throws IOException
    {
        Configuration configuration = new Configuration();
        configuration.set("fs.defaultFS", "hdfs://192.168.1.108:9000/");
        FileSystem fileSystem = FileSystem.get(configuration);
        FSDataOutputStream fsDataOutputStreamfile = fileSystem.create(new Path("hdfs://192.168.1.108:9000/分布式文件系统.txt"));
        FileInputStream fileInputStream = new FileInputStream("F:\\大数据学习\\8天学完hadoop\\weekend110-第2天\\doc分布式文件系统.txt");
        IOUtils.copy(fileInputStream, fsDataOutputStreamfile);
    }

报错如下:

log4j:WARN No appenders could be found for logger (org.apache.hadoop.util.Shell).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Exception in thread "main" org.apache.hadoop.security.AccessControlException: Permission denied: user=Administrator, access=WRITE, inode="/":xuliang:supergroup:drwxr-xr-x
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:350)
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:251)
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:189)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1756)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1740)
	at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1699)
	at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.resolvePathForStartFile(FSDirWriteFileOp.java:293)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2308)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2252)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:779)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:420)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:507)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1034)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:994)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:922)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1893)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2833)

原因:权限问题,解决方式:指定hadoop服务器运行时所属用户,问题解决,-DHADOOP_USER_NAME=xuliang

三:下载文件(方法二(简洁方式)):

    private FileSystem fileSystem = null;
    @Before
    public void  init() throws IOException, URISyntaxException, InterruptedException
    {

        Configuration configuration = new Configuration();
        configuration.set("fs.defaultFS", "hdfs://192.168.1.108:9000/");
        fileSystem = FileSystem.get(new URI("hdfs://192.168.1.108:9000/"), configuration, "xuliang");
    }

    @Test
    public void downLoadSimple() throws IOException
    {
        fileSystem.copyToLocalFile(new Path("hdfs://192.168.1.108:9000/分布式文件系统1.txt"), new Path("F:\\大数据学习\\8天学完hadoop\\weekend110-第2天\\hadoop-2.10.0.tar.gz"));
    }

报错如下:

log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.

java.lang.RuntimeException: java.io.FileNotFoundException: java.io.FileNotFoundException: HADOOP_HOME and hadoop.home.dir are unset. -see https://wiki.apache.org/hadoop/WindowsProblems

	at org.apache.hadoop.util.Shell.getWinUtilsPath(Shell.java:722)
	at org.apache.hadoop.util.Shell.getSetPermissionCommand(Shell.java:256)
	at org.apache.hadoop.util.Shell.getSetPermissionCommand(Shell.java:273)
	at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:767)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:235)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:219)
	at org.apache.hadoop.fs.RawLocalFileSystem.createOutputStreamWithMode(RawLocalFileSystem.java:314)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:302)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:334)
	at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.<init>(ChecksumFileSystem.java:399)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:462)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:441)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:1067)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:1048)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:937)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:391)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:364)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:314)
	at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:2375)
	at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:2344)
	at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:2320)
	at hdfs.HdfsUtil.downLoadSimple(HdfsUtil.java:65)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
	at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:271)
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:70)
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:50)
	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
	at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
	at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
	at org.junit.runner.JUnitCore.run(JUnitCore.java:160)
	at com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:68)
	at com.intellij.rt.execution.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:47)
	at com.intellij.rt.execution.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:242)
	at com.intellij.rt.execution.junit.JUnitStarter.main(JUnitStarter.java:70)

报错原因:需要在本地开发机器设置hadoop环境变量(作者自己的hdfs是部署在远程服务器上,所以自己的windows开发机器并没有设置),查看源码印证了这点:

源码如下:(看源码需要设置这个环境变量去使用hdfs的命令在本地创建所要下载的文件,感觉有点low啊,直接用方法一中的FileOutPutStream生成输入流不行嘛,才疏学浅,搞不懂。)

private static File checkHadoopHome() throws FileNotFoundException {
        String home = System.getProperty("hadoop.home.dir");
        if (home == null) {
            home = System.getenv("HADOOP_HOME");
        }

        return checkHadoopHomeInner(home);
    }
 @VisibleForTesting
    static File checkHadoopHomeInner(String home) throws FileNotFoundException {
        if (home == null) {
            throw new FileNotFoundException("HADOOP_HOME and hadoop.home.dir are unset.");
        } else {
            while(home.startsWith("\"")) {
                home = home.substring(1);
            }

            while(home.endsWith("\"")) {
                home = home.substring(0, home.length() - 1);
            }

            if (home.isEmpty()) {
                throw new FileNotFoundException("HADOOP_HOME or hadoop.home.dir set to an empty string");
            } else {
                File homedir = new File(home);
                if (!homedir.isAbsolute()) {
                    throw new FileNotFoundException("Hadoop home directory " + homedir + " " + "is not an absolute path.");
                } else if (!homedir.exists()) {
                    throw new FileNotFoundException("Hadoop home directory " + homedir + " " + "does not exist");
                } else if (!homedir.isDirectory()) {
                    throw new FileNotFoundException("Hadoop home directory " + homedir + " " + "is not a directory.");
                } else {
                    return homedir;
                }
            }
        }
    }

解决方法:init()方法代码里设置

System.setProperty("hadoop.home.dir", "H:\\installPkg\\hadoop-2.10.0\\hadoop-2.10.0");

或者直接设置机器环境变量HADOOP_HOME

设置完毕后还是报错:

java.lang.RuntimeException: java.io.FileNotFoundException: Could not locate Hadoop executable: H:\installPkg\hadoop-2.10.0\hadoop-2.10.0\bin\winutils.exe -see https://wiki.apache.org/hadoop/WindowsProblems

	at org.apache.hadoop.util.Shell.getWinUtilsPath(Shell.java:722)
	at org.apache.hadoop.util.Shell.getSetPermissionCommand(Shell.java:256)
	at org.apache.hadoop.util.Shell.getSetPermissionCommand(Shell.java:273)
	at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:767)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:235)
	at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.<init>(RawLocalFileSystem.java:219)
	at org.apache.hadoop.fs.RawLocalFileSystem.createOutputStreamWithMode(RawLocalFileSystem.java:314)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:302)
	at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:334)
	at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.<init>(ChecksumFileSystem.java:399)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:462)
	at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:441)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:1067)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:1048)
	at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:937)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:391)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:364)
	at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:314)
	at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:2375)
	at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:2344)
	at org.apache.hadoop.fs.FileSystem.copyToLocalFile(FileSystem.java:2320)
	at hdfs.HdfsUtil.downLoadSimple(HdfsUtil.java:65)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
	at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:271)
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:70)
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:50)
	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
	at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
	at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
	at org.junit.runner.JUnitCore.run(JUnitCore.java:160)
	at com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:68)
	at com.intellij.rt.execution.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:47)
	at com.intellij.rt.execution.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:242)
	at com.intellij.rt.execution.junit.JUnitStarter.main(JUnitStarter.java:70)

正如上面所说,创建本地文件HDFS有自己独有的操作方式,你如果bin目录下没有这个文件就把放上就行了

winutils.exe获取地址:winutils.rar_Hadoop,getFileLinkStatus方法-其它其他资源-CSDN下载

 欢迎大家添加博主微信,备注“技术交流”,拉你进技术交流群

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值