1.需求:
使用Spark SQL 连接hive ,读取数据,将统计结果存储到 mysql中
2.将写好的代码打包上传的集群,然后提交spark运行,前提是hive,HDFS已经启动
3.代码:
(1)pom.xml
org.apache.spark
spark-core_2.11
2.1.0
org.apache.spark
spark-sql_2.11
2.1.0
(2)demo4.scala
package day1209
import org.apache.spark.sql.SparkSession
import java.util.Properties
/**
* 使用Spark SQL 连接hive ,将统计结果存储到 mysql中
*
* ./spark-submit --master spark://hadoop1:7077 --jars /usr/local/tmp_files/mysql-connector-java-8.0.11.jar --driver-class-path /usr/local/tmp_files/mysql-connector-java-8.0.11.jar --class day0628.Demo4 /usr/local/tmp_files/Demo1209.jar
*/
object Demo4 {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder().appName("Hive2Mysql"