部分概念内容:hive学习(七)------创建动态分区_BigDate_小学生的博客-CSDN博客
进阶篇:spark增量抽取MySQL中的数据存入hive动态分区表(2)_月亮给我抄代码的博客-CSDN博客
前言:我这里把hive-site.xml文件放在了resource目录中,相关配置及依赖在后面。
不要用中文值作为静态分区或动态分区字段!!!
package my_project
import org.apache.spark.sql.SparkSession
object my_work02 {
def main(args: Array[String]): Unit = {
//TODO 动态分区表demo
val spark: SparkSession = SparkSession
.builder()
.appName("my_work01")
.master("local[*]")
.enableHiveSupport()
.getOrCreate()
//读取MySQL数据,创建临时表
spark.read
.format("jdbc")
.option("driver", "com.mysql.jdbc.Driver")
.option("url", "jdbc:mysql://localhost:3306/spark_db")
.option("user", "root")
.option("password", "456789")
.option("dbtable", "movies")
.load()
.createOrReplaceTempView("datas")
// TODO 以下两个参数必须要在建表前使用
//(开启动态分区,默认为true)
spark
.sql(
"""
|set hive.exec.dynamic.partition=true
|""".stripMargin
)
//(动态分区模式,默认为strict——必须有一列为静态分区,所以这里改成 nonstrict)
spark
.sql(
"""
|set hive.exec.dynamic.partition.mode=nonstrict
|""".stripMargin
)
// 其余优化参数配置:
// set hive.exec.max.dynamic.partitions.pernode=100 (默认100)
// 表示每个maper或reducer可以允许创建的最大动态分区个数,默认是100,超出则会报错。
// set hive.exec.max.dynamic.partitions =1000(默认值) 最大动态分区个数,超出报错
//
// set hive.exec.max.created.files =10000(默认) 全局可以创建的最大文件个数,超出报错。
//创建动态分区表
spark.sql(
"""
|create table mydb.dynamic_movies(
| movie_name string,
| boxoffice string,
| box_rate string,
| sessions string,
| show_count_rate string,
| avg_number string,
| attendance string,
| total_boxoffice string,
| movie_days string,
| releasedate string
|)partitioned by (current_time string)
|row format delimited fields terminated by '\t'
|""".stripMargin
)
//将MySQL中读取出来的全量数据以日期为分区字段同步到hive表中(动态分区)
spark
.sql(
"""
|insert into table mydb.dynamic_movies partition(time)
|select movie_name,boxoffice,box_rate,sessions,show_count_rate,avg_number,
|attendance,total_boxoffice,movie_days,releasedate,current_time
|from
| datas
|order by total_boxoffice desc
|""".stripMargin
)
// TODO 动态分区表会自动将select的最后一个字段映射为我们的分区字段,也就是说,最后一个字段必须是我们的分区字段,这点十分重要!!!
// 如果有多个动态分区字段,按顺序在排列就好了。
spark.stop()
}
}
依赖配置
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>self</groupId>
<artifactId>spark-practice</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<spark.version>2.1.0</spark.version>
<scala.version>2.11</scala.version>
</properties>
<repositories>
<repository>
<id>nexus-aliyun</id>
<name>Nexus aliyun</name>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_${scala.version}</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_${scala.version}</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_${scala.version}</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_${scala.version}</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-mllib_${scala.version}</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.27</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>2.0.0-alpha0</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>2.0.0-alpha0</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-nop</artifactId>
<version>1.7.25</version>
<scope>compile</scope>
</dependency>
</dependencies>
</project>
hive-site.xml配置文件
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- jdbc 连接的 URL -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://hadoop106:3306/metastore?useSSL=false</value>
</property>
<!-- jdbc 连接的 Driver-->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<!-- jdbc 连接的 username-->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<!-- jdbc 连接的 password -->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>000000</value>
</property>
<!-- Hive 元数据存储版本的验证 -->
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<!--元数据存储授权-->
<property>
<name>hive.metastore.event.db.notification.api.auth</name>
<value>false</value>
</property>
<!-- Hive 默认在 HDFS 的工作目录 -->
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
</property>
<!-- 指定存储元数据要连接的地址 -->
<property>
<name>hive.metastore.uris</name>
<value>thrift://hadoop106:9083</value>
</property>
<!-- 指定 hiveserver2 连接的 host -->
<property>
<name>hive.server2.thrift.bind.host</name>
<value>hadoop106</value>
</property>
<!-- 指定 hiveserver2 连接的端口号 -->
<property>
<name>hive.server2.thrift.port</name>
<value>10000</value>
</property>
<property>
<name>hive.cli.print.header</name>
<value>true</value>
</property>
<property>
<name>hive.cli.print.current.db</name>
<value>true</value>
</property>
</configuration>