hive常见错误

1.删除数据存放目录,重新格式化hadoop解决

java.lang.RuntimeException: Error caching map.xml: org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /tmp/hive/hadoop/97c28744-9396-409d-a714-ad71a548ec63/hive_2017-03-01_01-53-54_684_594441272444452517-1/-mr-10004/50488ee5-23e1-4d27-a707-ee0f32055a9b/map.xml could only be replicated to 0 nodes instead of minReplication (=1).  There are 2 datanode(s) running and 2 node(s) are excluded in this operation.

         at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1610)

         at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3315)

         at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:679)

         at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.addBlock(AuthorizationProviderProxyClientProtocol.java:214)

         at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:489)

         at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)

         at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617)

         at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073)

         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2086)

         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2082)

         at java.security.AccessController.doPrivileged(Native Method)

         at javax.security.auth.Subject.doAs(Subject.java:415)

         at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)

         at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2080)

 

         at org.apache.hadoop.hive.ql.exec.Utilities.setBaseWork(Utilities.java:737)

         at org.apache.hadoop.hive.ql.exec.Utilities.setMapWork(Utilities.java:672)

         at org.apache.hadoop.hive.ql.exec.Utilities.setMapRedWork(Utilities.java:664)

         at org.apache.hadoop.hive.ql.exec.mr.ExecDriver.execute(ExecDriver.java:374)

         at org.apache.hadoop.hive.ql.exec.mr.MapRedTask.execute(MapRedTask.java:137)

         at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:160)

         at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:100)

         at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:1782)

         at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:1539)

         at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:1318)

         at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1127)

         at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1115)

         at org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:220)

         at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:172)

         at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:383)

         at org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:775)

         at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:693)

         at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:628)

         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

         at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)

         at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)

         at java.lang.reflect.Method.invoke(Method.java:606)

         at org.apache.hadoop.util.RunJar.run(RunJar.java:221)

         at org.apache.hadoop.util.RunJar.main(RunJar.java:136)

Caused by: org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /tmp/hive/hadoop/97c28744-9396-409d-a714-ad71a548ec63/hive_2017-03-01_01-53-54_684_594441272444452517-1/-mr-10004/50488ee5-23e1-4d27-a707-ee0f32055a9b/map.xml could only be replicated to 0 nodes instead of minReplication (=1).  There are 2 datanode(s) running and 2 node(s) are excluded in this operation.

         at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1610)

         at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3315)

         at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:679)

         at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.addBlock(AuthorizationProviderProxyClientProtocol.java:214)

         at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:489)

         at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)

         at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617)

         at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073)

         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2086)

         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2082)

         at java.security.AccessController.doPrivileged(Native Method)

         at javax.security.auth.Subject.doAs(Subject.java:415)

         at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)

         at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2080)

 

         at org.apache.hadoop.ipc.Client.call(Client.java:1471)

         at org.apache.hadoop.ipc.Client.call(Client.java:1408)

         at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)

         at com.sun.proxy.$Proxy14.addBlock(Unknown Source)

         at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:409)

         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

         at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)

         at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)

         at java.lang.reflect.Method.invoke(Method.java:606)

         at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:256)

         at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:104)

         at com.sun.proxy.$Proxy15.addBlock(Unknown Source)

         at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.locateFollowingBlock(DFSOutputStream.java:1733)

         at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.nextBlockOutputStream(DFSOutputStream.java:1529)

         at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:683)

Job Submission failed with exception 'java.lang.RuntimeException(Error caching map.xml: org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /tmp/hive/hadoop/97c28744-9396-409d-a714-ad71a548ec63/hive_2017-03-01_01-53-54_684_594441272444452517-1/-mr-10004/50488ee5-23e1-4d27-a707-ee0f32055a9b/map.xml could only be replicated to 0 nodes instead of minReplication (=1).  There are 2 datanode(s) running and 2 node(s) are excluded in this operation.

         at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1610)

         at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3315)

         at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:679)

         at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.addBlock(AuthorizationProviderProxyClientProtocol.java:214)

         at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:489)

         at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)

         at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617)

         at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073)

         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2086)

         at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2082)

         at java.security.AccessController.doPrivileged(Native Method)

         at javax.security.auth.Subject.doAs(Subject.java:415)

         at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)

         at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2080)

)'

 

FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask


2. 从hadoop磁盘空间不够引起。

2017-03-15 18:03:07,202 DEBUG [main]: metastore.HiveMetaStore(HiveMetaStore.java:createDefaultRoles_core(734)) - Failed while grantingglobal privs to admin

InvalidObjectException(message:All is already granted by admin)

      atorg.apache.hadoop.hive.metastore.ObjectStore.grantPrivileges(ObjectStore.java:4186)

      atsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

      atsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)

      atsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)

      at java.lang.reflect.Method.invoke(Method.java:606)

      atorg.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:114)

      atcom.sun.proxy.$Proxy5.grantPrivileges(Unknown Source)

      atorg.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultRoles_core(HiveMetaStore.java:731)

      atorg.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultRoles(HiveMetaStore.java:697)

      atorg.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:482)

      at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)

      atorg.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:84)

      atorg.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:5923)

      atorg.apache.hadoop.hive.metastore.HiveMetaStoreClient.<init>(HiveMetaStoreClient.java:201)

      atorg.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient.<init>(SessionHiveMetaStoreClient.java:74)

      atsun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)

      atsun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)

      atsun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)

      at java.lang.reflect.Constructor.newInstance(Constructor.java:526)

      atorg.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1501)

      atorg.apache.hadoop.hive.metastore.RetryingMetaStoreClient.<init>(RetryingMetaStoreClient.java:67)

      at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.getProxy(RetryingMetaStoreClient.java:82)

      atorg.apache.hadoop.hive.ql.metadata.Hive.createMetaStoreClient(Hive.java:3024)

      atorg.apache.hadoop.hive.ql.metadata.Hive.getMSC(Hive.java:3043)

      at org.apache.hadoop.hive.ql.metadata.Hive.getAllFunctions(Hive.java:3268)

      atorg.apache.hadoop.hive.ql.metadata.Hive.reloadFunctions(Hive.java:215)

      atorg.apache.hadoop.hive.ql.metadata.Hive.registerAllFunctionsOnce(Hive.java:201)

      atorg.apache.hadoop.hive.ql.metadata.Hive.<init>(Hive.java:312)

      atorg.apache.hadoop.hive.ql.metadata.Hive.get(Hive.java:273)

      atorg.apache.hadoop.hive.ql.metadata.Hive.get(Hive.java:248)

      atorg.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:513)

      at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:689)

      atorg.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:628)

      atsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

      atsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)

      atsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)

      atjava.lang.reflect.Method.invoke(Method.java:606)

      atorg.apache.hadoop.util.RunJar.run(RunJar.java:221)

      atorg.apache.hadoop.util.RunJar.main(RunJar.java:136)

3.

FAILED: SemanticException Unable to determine if hdfs://mycluster:9000/user/hive/warehouse/flwordcount is encrypted: java.lang.IllegalArgumentException: Wrong FS: hdfs://mycluster:9000/user/hive/warehouse/flwordcount, expected: hdfs://mycluster

看了一下,是因为mysql中保存的元数据中hdfs路径还是以前的,这样就导致报错。


解决:找到mysql中保存hive元数据的库,然后找到表SDS,里面有个LOCATION字段,只要更新一下相应的位置就可以了。

hdfs://master:9000/user/hive/warehouse/flwordcount1 改成 hdfs://mycluster/user/hive/warehouse/flwordcount



  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值