既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,涵盖了95%以上大数据知识点,真正体系化!
由于文件比较多,这里只是将部分目录截图出来,全套包含大厂面经、学习笔记、源码讲义、实战项目、大纲路线、讲解视频,并且后续会持续更新
*/
override def parsePlan(sqlText: String): LogicalPlan = {
val logicalPlan = parser.parsePlan(sqlText)
logicalPlan transform {
case project @ Project(projectList, _) =>
projectList.foreach {
name =>
if (name.isInstanceOf[UnresolvedStar]) {
throw new RuntimeException(“You must specify your project column set,” +
" * is not allowed.")
}
}
project
}
logicalPlan
}
/**
- Parse a string to an [[Expression]].
*/
override def parseExpression(sqlText: String): Expression = parser.parseExpression(sqlText)
/**
- Parse a string to a [[TableIdentifier]].
*/
override def parseTableIdentifier(sqlText: String): TableIdentifier =
parser.parseTableIdentifier(sqlText)
/**
- Parse a string to a [[FunctionIdentifier]].
*/
override def parseFunctionIdentifier(sqlText: String): FunctionIdentifier =
parser.parseFunctionIdentifier(sqlText)
/**
-
Parse a string to a [[StructType]]. The passed SQL string should be a comma separated
-
list of field definitions which will preserve the correct Hive metadata.
*/
override def parseTableSchema(sqlText: String): StructType =
parser.parseTableSchema(sqlText)
/**
- Parse a string to a [[DataType]].
*/
override def parseDataType(sqlText: String): DataType = parser.parseDataType(sqlText)
}
接下来,我们测试一下
package wang.datahub.parser
import org.apache.spark.sql.{SparkSession, SparkSessionExtensions}
import org.apache.spark.sql.catalyst.parser.ParserInterface
object MyParserApp {
def main(args: Array[String]): Unit = {
System.setProperty(“hadoop.home.dir”,“E:\devlop\envs\hadoop-common-2.2.0-bin-master”);
type ParserBuilder = (SparkSession, ParserInterface) => ParserInterface
type ExtensionsBuilder = SparkSessionExtensions => Unit
val parserBuilder: ParserBuilder = (_, parser) => new MyParser(parser)
val extBuilder: ExtensionsBuilder = { e => e.injectParser(parserBuilder)}
val spark = SparkSession
.builder()
.appName(“Spark SQL basic example”)
.config(“spark.master”, “local[*]”)
.withExtensions(extBuilder)
.getOrCreate()
spark.sparkContext.setLogLevel(“ERROR”)
import spark.implicits._
val df = Seq(
( “First Value”,1, java.sql.Date.valueOf(“2010-01-01”)),
( “First Value”,4, java.sql.Date.valueOf(“2010-01-01”)),
(“Second Value”,2, java.sql.Date.valueOf(“2010-02-01”)),
(“Second Value”,9, java.sql.Date.valueOf(“2010-02-01”))
).toDF(“name”, “score”, “date_column”)
df.createTempView(“p”)
// val df = spark.read.json(“examples/src/main/resources/people.json”)
// df.toDF().write.saveAsTable(“person”)
//,javg(score)
// custom parser
// spark.sql("select * from p ").show
spark.sql(“select * from p”).show()
}
}
下面是执行结果,符合我们的预期。
扩展优化器
=====
接下来,我们来扩展优化器,砖厂提供了很多默认的RBO,这里可以方便的构建我们自己的优化规则,本例中我们构建一套比较奇怪的规则,而且是完全不等价的,这里只是为了说明。
针对字段+0
的操作,规则如下:
-
如果
0
出现在+
左边,则直接将字段变成右表达式,即0+nr
等效为nr
-
如果
0
出现在+
右边,则将0
变成3
,即nr+0
变成nr+3
-
如果没出现
0
,则表达式不变
下面是代码:
package wang.datahub.optimizer
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions.{Add, Expression, Literal}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
object MyOptimizer extends Rule[LogicalPlan] {
def apply(logicalPlan: LogicalPlan): LogicalPlan = {
logicalPlan.transformAllExpressions {
case Add(left, right) => {
println(“this this my add optimizer”)
if (isStaticAdd(left)) {
right
} else if (isStaticAdd(right)) {
Add(left, Literal(3L))
} else {
Add(left, right)
}
}
}
}
private def isStaticAdd(expression: Expression): Boolean = {
expression.isInstanceOf[Literal] && expression.asInstanceOf[Literal].toString == “0”
}
def main(args: Array[String]): Unit = {
System.setProperty(“hadoop.home.dir”,“E:\devlop\envs\hadoop-common-2.2.0-bin-master”);
val testSparkSession: SparkSession = SparkSession.builder().appName(“Extra optimization rules”)
.master(“local[*]”)
.withExtensions(extensions => {
extensions.injectOptimizerRule(session => MyOptimizer)
})
.getOrCreate()
testSparkSession.sparkContext.setLogLevel(“ERROR”)
import testSparkSession.implicits._
testSparkSession.experimental.extraOptimizations = Seq()
Seq(-1, -2, -3).toDF(“nr”).write.mode(“overwrite”).json(“./test_nrs”)
// val optimizedResult = testSparkSession.read.json(“./test_nrs”).selectExpr(“nr + 0”)
testSparkSession.read.json(“./test_nrs”).createTempView(“p”)
var sql = “select nr+0 from p”;
var t = testSparkSession.sql(sql)
println(t.queryExecution.optimizedPlan)
println(sql)
t.show()
sql = “select 0+nr from p”;
var u = testSparkSession.sql(sql)
println(u.queryExecution.optimizedPlan)
println(sql)
u.show()
sql = “select nr+8 from p”;
var v = testSparkSession.sql(sql)
println(v.queryExecution.optimizedPlan)
println(sql)
v.show()
// println(optimizedResult.queryExecution.optimizedPlan.toString() )
// optimizedResult.collect().map(row => row.getAs[Long](“(nr + 0)”))
Thread.sleep(1000000)
}
}
执行如下
this this my add optimizer
this this my add optimizer
this this my add optimizer
Project [(nr#12L + 3) AS (nr + CAST(0 AS BIGINT))#14L]
± Relation[nr#12L] json
select nr+0 from p
this this my add optimizer
既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,涵盖了95%以上大数据知识点,真正体系化!
由于文件比较多,这里只是将部分目录截图出来,全套包含大厂面经、学习笔记、源码讲义、实战项目、大纲路线、讲解视频,并且后续会持续更新
ion[nr#12L] json
select nr+0 from p
this this my add optimizer
[外链图片转存中…(img-hl1RWR09-1714889511960)]
[外链图片转存中…(img-i25S5CWZ-1714889511961)]
[外链图片转存中…(img-wMwJMfBq-1714889511961)]
既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,涵盖了95%以上大数据知识点,真正体系化!
由于文件比较多,这里只是将部分目录截图出来,全套包含大厂面经、学习笔记、源码讲义、实战项目、大纲路线、讲解视频,并且后续会持续更新