记一次hive 报错NoViableAltException(-1@[215:51: ( KW_AS )?])

NoViableAltException(-1@[215:51: ( KW_AS )?])

报错日志:
NoViableAltException(-1@[215:51: ( KW_AS )?]) at org.antlr.runtime.DFA.noViableAlt(DFA.java:158) at org.antlr.runtime.DFA.predict(DFA.java:144) at org.apache.hadoop.hive.ql.parse.HiveParser_FromClauseParser.subQuerySource(HiveParser_FromClauseParser.java:5319) at org.apache.hadoop.hive.ql.parse.HiveParser_FromClauseParser.fromSource(HiveParser_FromClauseParser.java:3741) at org.apache.hadoop.hive.ql.parse.HiveParser_FromClauseParser.joinSource(HiveParser_FromClauseParser.java:1873) at org.apache.hadoop.hive.ql.parse.HiveParser_FromClauseParser.fromClause(HiveParser_FromClauseParser.java:1518) at org.apache.hadoop.hive.ql.parse.HiveParser.fromClause(HiveParser.java:45861) at org.apache.hadoop.hive.ql.parse.HiveParser.selectStatement(HiveParser.java:41516) at org.apache.hadoop.hive.ql.parse.HiveParser.regularBody(HiveParser.java:41402) at org.apache.hadoop.hive.ql.parse.HiveParser.queryStatementExpressionBody(HiveParser.java:40413) at org.apache.hadoop.hive.ql.parse.HiveParser.queryStatementExpression(HiveParser.java:40283) at org.apache.hadoop.hive.ql.parse.HiveParser.execStatement(HiveParser.java:1590) at org.apache.hadoop.hive.ql.parse.HiveParser.statement(HiveParser.java:1109) at org.apache.hadoop.hive.ql.parse.ParseDriver.parse(ParseDriver.java:202) at org.apache.hadoop.hive.ql.parse.ParseDriver.parse(ParseDriver.java:166) at org.apache.spark.sql.hive.HiveQl$.getAst(HiveQl.scala:276) at org.apache.spark.sql.hive.HiveQl$.createPlan(HiveQl.scala:303) at org.apache.spark.sql.hive.ExtendedHiveQlParser$$anonfun$hiveQl$1.apply(ExtendedHiveQlParser.scala:41) at org.apache.spark.sql.hive.ExtendedHiveQlParser$$anonfun$hiveQl$1.apply(ExtendedHiveQlParser.scala:40) at scala.util.parsing.combinator.Parsers$Success.map(Parsers.scala:136) at scala.util.parsing.combinator.Parsers$Success.map(Parsers.scala:135) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$map$1.apply(Parsers.scala:242) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$map$1.apply(Parsers.scala:242) at scala.util.parsing.combinator.Parsers$$anon$3.apply(Parsers.scala:222) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1$$anonfun$apply$2.apply(Parsers.scala:254) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1$$anonfun$apply$2.apply(Parsers.scala:254) at scala.util.parsing.combinator.Parsers$Failure.append(Parsers.scala:202) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1.apply(Parsers.scala:254) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1.apply(Parsers.scala:254) at scala.util.parsing.combinator.Parsers$$anon$3.apply(Parsers.scala:222) at scala.util.parsing.combinator.Parsers$$anon$2$$anonfun$apply$14.apply(Parsers.scala:891) at scala.util.parsing.combinator.Parsers$$anon$2$$anonfun$apply$14.apply(Parsers.scala:891) at scala.util.DynamicVariable.withValue(DynamicVariable.scala:57) at scala.util.parsing.combinator.Parsers$$anon$2.apply(Parsers.scala:890) at scala.util.parsing.combinator.PackratParsers$$anon$1.apply(PackratParsers.scala:110) at org.apache.spark.sql.catalyst.AbstractSparkSQLParser.parse(AbstractSparkSQLParser.scala:34) at org.apache.spark.sql.hive.HiveQl$.parseSql(HiveQl.scala:295) at org.apache.spark.sql.hive.HiveQLDialect$$anonfun$parse$1.apply(HiveContext.scala:66) at org.apache.spark.sql.hive.HiveQLDialect$$anonfun$parse$1.apply(HiveContext.scala:66) at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$withHiveState$1.apply(ClientWrapper.scala:290) at org.apache.spark.sql.hive.client.ClientWrapper.liftedTree1$1(ClientWrapper.scala:237) at org.apache.spark.sql.hive.client.ClientWrapper.retryLocked(ClientWrapper.scala:236) at org.apache.spark.sql.hive.client.ClientWrapper.withHiveState(ClientWrapper.scala:279) at org.apache.spark.sql.hive.HiveQLDialect.parse(HiveContext.scala:65) at org.apache.spark.sql.SQLContext$$anonfun$2.apply(SQLContext.scala:211) at org.apache.spark.sql.SQLContext$$anonfun$2.apply(SQLContext.scala:211) at org.apache.spark.sql.execution.SparkSQLParser$$anonfun$org$apache$spark$sql$execution$SparkSQLParser$$others$1.apply(SparkSQLParser.scala:114) at org.apache.spark.sql.execution.SparkSQLParser$$anonfun$org$apache$spark$sql$execution$SparkSQLParser$$others$1.apply(SparkSQLParser.scala:113) at scala.util.parsing.combinator.Parsers$Success.map(Parsers.scala:136) at scala.util.parsing.combinator.Parsers$Success.map(Parsers.scala:135) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$map$1.apply(Parsers.scala:242) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$map$1.apply(Parsers.scala:242) at scala.util.parsing.combinator.Parsers$$anon$3.apply(Parsers.scala:222) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1$$anonfun$apply$2.apply(Parsers.scala:254) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1$$anonfun$apply$2.apply(Parsers.scala:254) at scala.util.parsing.combinator.Parsers$Failure.append(Parsers.scala:202) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1.apply(Parsers.scala:254) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1.apply(Parsers.scala:254) at scala.util.parsing.combinator.Parsers$$anon$3.apply(Parsers.scala:222) at scala.util.parsing.combinator.Parsers$$anon$2$$anonfun$apply$14.apply(Parsers.scala:891) at scala.util.parsing.combinator.Parsers$$anon$2$$anonfun$apply$14.apply(Parsers.scala:891) at scala.util.DynamicVariable.withValue(DynamicVariable.scala:57) at scala.util.parsing.combinator.Parsers$$anon$2.apply(Parsers.scala:890) at scala.util.parsing.combinator.PackratParsers$$anon$1.apply(PackratParsers.scala:110) at org.apache.spark.sql.catalyst.AbstractSparkSQLParser.parse(AbstractSparkSQLParser.scala:34) at org.apache.spark.sql.SQLContext$$anonfun$1.apply(SQLContext.scala:208) at org.apache.spark.sql.SQLContext$$anonfun$1.apply(SQLContext.scala:208) at org.apache.spark.sql.execution.datasources.DDLParser.parse(DDLParser.scala:43) at org.apache.spark.sql.SQLContext.parseSql(SQLContext.scala:231) at org.apache.spark.sql.hive.HiveContext.parseSql(HiveContext.scala:331) at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:817) at org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:63) at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:311) at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376) at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:226) at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731) at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181) at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206) at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121) at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala) Error in query: cannot recognize input near '<EOF>' '<EOF>' '<EOF>' in subquery source; line 17 pos 94

翻译过来就是子表输入无法识别,下面是我做的一些尝试:
1. 飘号``(网上有一种说法是加飘号能解决)

SELECT SUM(`day1`) AS `day_num`
,SUM(`month1`) AS `month_num`
,SUM(`year1`) AS `year_num`
,COLLECT_SET(`date_max`) AS `date_max`
FROM 
(SELECT
IF(total_3.`date_max` == total_3.date1,1,0) AS `day1`
,IF(total_3.`date1` >= CONCAT(SUBSTR(total_3.`date_max`,1,7),"-01"),1,0) AS `month1`
,IF(total_3.`date1` >= CONCAT(SUBSTR(total_3.`date_max`,1,4),"-01-01"),1,0) AS `year1`
,total_3.`date_max`
FROM
(SELECT `date1`,`date_max` from ( 
SELECT `date1`,"A" AS flag
FROM `original_service_code.o_bd_workdata_s`) AS total_1
JOIN (SELECT MAX(`date1`) AS date_max --取出最大日期
,"A" AS flag
FROM original_service_code.o_bd_workdata_s) AS total ON total_1.flag = total.flag) AS total_3);

然而并没有用

2. 把sql语句分开后子表分别建表

步骤1
drop table apply_service_code.B ;
CREATE TABLE apply_service_code.B AS 
SELECT MAX(date1) AS date_max --取出最大日期
,"A" AS flag
FROM original_service_code.o_bd_workdata_s;
步骤2
create table apply_service_code.c as 
SELECT date1,"A" AS flag
FROM original_service_code.o_bd_workdata_s;

步骤3
SELECT date1,"A" AS flag from (SELECT MAX(date1) AS date_max --取出最大日期
,"A" AS flag
FROM original_service_code.o_bd_workdata_s) AS total_1
JOIN (SELECT date1,"A" AS flag
FROM original_service_code.o_bd_workdata_s) AS total ON total_1.flag = total.flag ;

select date,

步骤4
create table apply_service_code.d as SELECT
IF(total_3.date_max == total_3.date1,1,0) AS day
,IF(total_3.date1 >= CONCAT(SUBSTR(total_3.date_max,1,7),"-01"),1,0) AS month
,IF(total_3.date1 >= CONCAT(SUBSTR(total_3.date_max,1,4),"-01-01"),1,0) AS year
,total_3.date_max
FROM
(SELECT date1,date_max from ( 
SELECT date1,"A" AS flag
FROM original_service_code.o_bd_workdata_s) AS total_1
JOIN (SELECT MAX(date1) AS date_max --取出最大日期
,"A" AS flag
FROM original_service_code.o_bd_workdata_s) AS total ON total_1.flag = total.flag) AS total_3;

CREATE TABLE apply_service_code.a_bd_worknumber_s AS
SELECT SUM(day1) AS day_num
,SUM(month1) AS month_num
,SUM(year1) AS year_num
,COLLECT_SET(date_max)[0] AS date_max
FROM apply_service_code.d ;

实测这种方法能解决以上该种问题(反正我的NoViableAltException都能解决)

3. 更换hive版本(另一种说法是版本过低)
我用的是hive0.13.0,然后改为1.2.2后,并没有什么用

目前只有这些解决办法,网上没有一个做法靠谱的,第二种还是我自己试出来的,之后要是遇到更好的解决办法在更新

大数据,从入门到放弃,你值得拥有


4.更新一下后来发现好使了,只要把字段名指定到子表别名就好使了,改变后代码如下:
DROP TABLE IF EXISTS apply_service_code.a_bd_worknumber_s;
--创建表填充表
CREATE TABLE apply_service_code.a_bd_worknumber_s AS
SELECT SUM(D.day1) AS day_num
,SUM(D.month1) AS month_num
,SUM(D.year1) AS year_num
,COLLECT_SET(D.date_max) AS date_max
FROM 
(SELECT
IF(C.date_max == C.date1,1,0) AS day1
,IF(C.date1 >= CONCAT(SUBSTR(C.date_max,1,7),"-01"),1,0) AS month1
,IF(C.date1 >= CONCAT(SUBSTR(C.date_max,1,4),"-01-01"),1,0) AS year1
,C.date_max
FROM
(SELECT date1,date_max from ( 
SELECT date1,"A" AS flag
FROM original_service_code.o_bd_workdata_s ) AS A
JOIN (SELECT MAX(date1) AS date_max --取出最大日期
,"A" AS flag
FROM original_service_code.o_bd_workdata_s) AS B ON A.flag = B.flag) AS C) AS D;

深刻理解了这个报错的含义了,very good

先暂时不放弃好了,哈哈哈哈

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值