package bi.tag
import java.util.Properties
import bi.utils.{ConfigUtils, KoboldAppUtil}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.slf4j.LoggerFactory
/*
* Created by ** on 2019-08-07
*/
object TestSparkSqlSource {
Logger.getLogger("org").setLevel(Level.WARN)
val logger = LoggerFactory.getLogger(TestSparkSqlSource.getClass.getSimpleName)
def main(args: Array[String]): Unit = {
val spark = KoboldAppUtil.createSparkContext(TestSparkSqlSource.getClass.getSimpleName)
println("---数据处理开始---")
testHive(spark)
println("---数据处理结束---")
spark.close()
}
def testHive(spark: SparkSession): Unit = {
spark.sql(
"""
|SELECT
| brand,
| d.Name RegionName,
| c.Name BranchName,
| b.WorkNo,
| t1.EmployeeName,
| b.EntryDate,
| e.fillindate lastOfferDate,
| f.count historyOfferCount
|FROM
| (
| SELECT
| employeeno,
| employeename
| FROM
| ******
| WHERE
| FillinDate>='2019-07-15'
| AND FillinDate<='2019-07-21'
| AND PhaseId=10
| AND Role='顾问'
| AND IsApprove=1
| AND IsCancel IS NULL
| GROUP BY
| employeeno,
| employeename )t1
|LEFT JOIN
| *****s b
|ON
spark:sparksql:读取文件/读取hive表/写出到hive/写出到mysql
最新推荐文章于 2024-08-04 00:33:27 发布
本文介绍如何使用SparkSQL进行文件读取、Hive表操作以及数据写入到Hive和MySQL。在执行过程中,需要注意MySQL账户权限问题,确保已预先创建好表并匹配字段类型,否则可能导致错误。
摘要由CSDN通过智能技术生成