升级处理了sql文件中文注册乱码问题
yarn-client模式,local模式,配置文件直接在本地就可以直接运行了。
yarn-cluster在读取配置文件的时候让运维兄弟在yarn的nodeManager所有计算节的磁盘上挂载了一个hdfs共享盘(resourceManager节点上没挂),直接把配置文件和sql文件丢进去,直接cluster模式跑就和client,local模式一样。
===================pom文件如下=============
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>hx.example</groupId>
<artifactId>sparkDwdFilter</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<maven.compiler.encoding>UTF-8</maven.compiler.encoding>
<encoding>UTF-8</encoding>
<!-- <hadoop.version>2.7.0</hadoop.version>-->
<!-- <hadoop.version>3.2.2</hadoop.version>-->
<hadoop.version>3.0.0-cdh6.3.2</hadoop.version>
<!-- <hive.version>1.2.1</hive.version>-->
<hive.version>2.1.1-cdh6.3.2</hive.version>
<hbase.version>2.1.0-cdh6.3.2</hbase.version>
<scala.version>2.11.12</scala.version>
<spark.version>2.4.0-cdh6.3.2</spark.version>
<!-- <gt.version>21.1</gt.version>-->
<!-- <geomesa.version>2.4.0</geomesa.version>-->
<!-- <geotools.version>24-SNAPSHOT</geotools.version>-->
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<!-- <scope>test</scope>-->
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
<!--将netty包排除-->
<exclusions>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
</exclusions>
</dependency>
<!--解决io.netty.buffer.PooledByteBufAllocator.defaultNumHeapArena()I异常,-->
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
<version>4.1.18.Final</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.scala-lang/scala-library -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-sql -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark.version}</version>
<exclusions>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-hive -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- 解决问题引的包: org.apache.hadoop.hive.hbase.hbaseserde not found-->
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-hbase-handler</artifactId>
<version>${hive.version}</version>
</dependency>
<!-- <dependency>-->
<!-- <groupId>org.apache.hadoop</groupId>-->
<!-- <artifactId>hadoop-hdfs</artifactId>-->
<!-- <version>${hadoop.version}</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.hadoop</groupId>-->
<!-- <artifactId>hadoop-hdfs-client</artifactId>-->
<!-- <version>${hadoop.version}</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.hbase</groupId>-->
<!-- <artifactId>hbase-client</artifactId>-->
<!-- <version>${hbase.version}</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.hbase</groupId>-->
<!-- <artifactId>hbase-common</artifactId>-->
<!-- <version>${hbase.version}</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.hbase</groupId>-->
<!-- <artifactId>hbase-http</artifactId>-->
<!-- <version>${hbase.version}</version>-->
<!-- </dependency>-->
<!--spark-hive begin-->
<!-- <dependency>-->
<!-- <groupId>org.apache.hive</groupId>-->
<!-- <artifactId>hive-serde</artifactId>-->
<!-- <version>${hive.version}</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.hive</groupId>-->
<!-- <artifactId>hive-exec</artifactId>-->
<!-- <version>${hive.version}</version>-->
<!-- <exclusions>-->
<!-- <exclusion>-->
<!-- <groupId>org.apache.avro</groupId>-->
<!-- <artifactId>avro</artifactId>-->
<!-- </exclusion>-->
<!-- </exclusions>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.hive.hcatalog</groupId>-->
<!-- <artifactId>hive-hcatalog-core</artifactId>-->
<!-- <version>${hive.version}</version>-->
<!-- <exclusions>-->
<!-- <exclusion>-->
<!-- <groupId>org.apache.avro</groupId>-->
<!-- <artifactId>avro</artifactId>-->
<!-- </exclusion>-->
<!-- </exclusions>-->
<!-- </dependency>-->
<!--spark-hive end-->
<!-- https://mvnrepository.com/artifact/com.google.guava/guava -->
<!-- <dependency>-->
<!-- <groupId>com.google.guava</groupId>-->
<!-- <artifactId>guava</artifactId>-->
<!-- <version>15.0</version>-->
<!-- </dependency>-->
<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
<!-- <dependency>-->
<!-- <groupId>mysql</groupId>-->
<!-- <artifactId>mysql-connector-java</artifactId>-->
<!-- <version>5.1.46</version>-->
<!-- </dependency>-->
<!-- https://mvnrepository.com/artifact/org.apache.hive/hive-cli -->
<!-- <dependency>-->
<!-- <groupId>org.apache.hive</groupId>-->
<!-- <artifactId>hive-cli</artifactId>-->
<!-- <version>${hive.version}</version>-->
<!-- </dependency>-->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.15</version>
<exclusions>
<exclusion>
<groupId>javax.jms</groupId>
<artifactId>jms</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jdmk</groupId>
<artifactId>jmxtools</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jmx</groupId>
<artifactId>jmxri</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<repositories>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
<!-- <repository>-->
<!-- <id>alimaven</id>-->
<!-- <url>http://maven.aliyun.com/nexus/content/repositories/central/</url>-->
<!-- </repository>-->
<!-- <repository>-->
<!-- <id>locationtech-releases</id>-->
<!-- <url>https://repo.locationtech.org/content/groups/releases</url>-->
<!-- <snapshots>-->
<!-- <enabled>false</enabled>-->
<!-- </snapshots>-->
<!-- </repository>-->
<!-- <repository>-->
<!-- <id>locationtech-snapshots</id>-->
<!-- <url>https://repo.locationtech.org/content/groups/snapshots</url>-->
<!-- <releases>-->
<!-- <enabled>false</enabled>-->
<!-- </releases>-->
<!-- <snapshots>-->
<!-- <enabled>true</enabled>-->
<!-- </snapshots>-->
<!-- </repository>-->
<!-- <repository>-->
<!-- <id>boundlessgeo</id>-->
<!-- <url>http://repo.boundlessgeo.com/main</url>-->
<!-- </repository>-->
<!-- <repository>-->
<!-- <id>osgeo</id>-->
<!-- <url>http://download.osgeo.org/webdav/geotools</url>-->
<!-- </repository>-->
<!-- <repository>-->
<!-- <id>conjars.org</id>-->
<!-- <url>http://conjars.org/repo</url>-->
<!-- </repository>-->
<!-- <repository>-->
<!-- <id>spring-plugin</id>-->
<!-- <url>https://repo.spring.io/plugins-release/</url>-->
<!-- </repository>-->
</repositories>
<build>
<sourceDirectory>src/main/java</sourceDirectory>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.2</version>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
<configuration>
<args>
<arg>-dependencyfile</arg>
<arg>${project.build.directory}/.scala_dependencies</arg>
</args>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.1.0</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
</transformers>
<relocations>
<relocation>
<pattern>org.apache.http</pattern>
<shadedPattern>org.apache.myhttp</shadedPattern>
</relocation>
</relocations>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/maven/**</exclude>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2.1</version>
<executions>
<execution>
<goals>
<goal>exec</goal>
</goals>
</execution>
</executions>
<configuration>
<executable>java</executable>
<includeProjectDependencies>true</includeProjectDependencies>
<includePluginDependencies>false</includePluginDependencies>
<classpathScope>compile</classpathScope>
<mainClass></mainClass>
</configuration>
</plugin>
<!--指定各种jdk等级的插件-->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
</plugins>
</build>
</project>
===================pom文件结束=======================
===============代码1==如下==================
package hx.com
import hx.com.constant.PropConstants
import hx.com.util.PropertieUtil
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.sql.SparkSession
import java.io.File
import java.util.Properties
import scala.io.{BufferedSource, Source}
/**
* ods层数据清洗落地到dwd层
*/
object Ods2DwdFilterSql {
def main(args: Array[String]): Unit = {
val filePath: String = args(0)
//读取集群配置文件
val prop: Properties = PropertieUtil.load("config.properties")
//本地测试读文件
// val prop: Properties = PropertieUtil.getProperties("/config.properties")
System.setProperty("java.security.krb5.conf", prop.getProperty(PropConstants.KRB5_CONF_PATH))
System.setProperty("HADOOP_USER_NAME", prop.getProperty(PropConstants.HADOOP_USER_NAME))
System.setProperty("user.name", prop.getProperty(PropConstants.USER_NAME))
UserGroupInformation.loginUserFromKeytab(
prop.getProperty(PropConstants.KEYTAB_NAME), prop.getProperty(PropConstants.KEYTAB_FILE_PATH)
)
System.out.println(UserGroupInformation.getLoginUser)
val session: SparkSession = SparkSession.builder()//.master("local[2]")
.appName("SparkSeesionApp")
.enableHiveSupport() //支持hive
.getOrCreate()
// session.sparkContext.setLogLevel("WARN")
val sql: String = doFile(filePath)
val strings: Array[String] = sql.split(";")
var i = 0;
strings.foreach(sql=>{
val startTime: Long = System.currentTimeMillis()
println("==============第 "+(i+1)+" 次===sql开始=================")
println(sql)
session.sql(sql).show()
val stopTime: Long = System.currentTimeMillis()
val processTime: Long = (startTime - stopTime) / 1000
println("===============第 "+(i+1)+" 次==sql结束====耗时=="+processTime+" 秒==========")
i = i+1
})
session.stop()
}
//读取外部sql文件文件
def doFile(fileName: String): String = {
val file: File = new File(fileName)
import java.io.FileInputStream
val stream: FileInputStream = new FileInputStream(file)
val buff: BufferedSource = Source.fromInputStream(stream,"UTF-8")
//读取拼装SQL
val sql: String = buff.getLines().mkString("\n")
sql
}
}
===================代码1结束===============
===================代码2开始===============
package hx.com.util;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
public class PropertieUtil {
public static Properties getProperties(String path){
Properties prop = new Properties();
InputStream inputStream = Object.class.getResourceAsStream(path);
try {
prop.load(inputStream);
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
inputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return prop;
}
public static Properties load(String path){
Properties prop = new Properties();
try {
prop.load(new FileInputStream(path));
} catch (Exception e) {
e.printStackTrace();
}
return prop;
}
}
===================代码2结束===============
=====conf.proerties====start=========
namespace=ods_membership_prd
column_family=cf
krb5_conf_path=/etc/krb5.conf
#krb5_conf_path=D:/workspace/canal-kafka2hbase/src/main/resources/krb5.ini
keytab_file_path=/opt/etl/config/etl_admin.keytab
#keytab_file_path=hdfs://HDFS0525/user/etl_admin/etl_admin.keytab
#keytab_file_path=D:/soft/kerbros/etl_admin.keytab
hadoop_user_name=etl_admin
user_name=etl_admin
keytab_name=etl_admin@xxx.com
=====conf.proerties====end========
====集群上local 启动模式===
#!/bin/bash
if [ $# -eq 1 ];then
spark-submit --master local[4] --class hx.com.Ods2DwdFilterSql --files /home/etl_admin/spark/config.properties sparkDwdFilter-1.0-SNAPSHOT.jar $1
else
echo "Please input command. eg: ./$0 filename.sql(hql)"
fi
=====yarn-client 启动模式===
#!/bin/bash
if [ $# -eq 1 ];then
spark-submit \
--master yarn \
--deploy-mode client \
--queue default \
--driver-memory 2g \
--num-executors 3 \
--executor-memory 2g \
--executor-cores 2 \
--class hx.com.Ods2DwdFilterSql \
--files /home/etl_admin/spark/config.properties \
sparkDwdFilter-1.0-SNAPSHOT.jar /opt/etl/sqlFiles/$1
else
echo "Please input command. eg: ./$0 filename.sql(hql)"
fi
=======yarn-cluster 启动模式=======
#!/bin/bash
if [ $# -eq 1 ];then
spark-submit \
--master yarn \
--deploy-mode cluster \
--queue default \
--driver-memory 2g \
--num-executors 3 \
--executor-memory 2g \
--executor-cores 2 \
--class hx.com.Ods2DwdFilterSql \
--files /home/etl_admin/spark/config.properties \
sparkDwdFilter-1.0-SNAPSHOT.jar /opt/etl/sqlFiles/$1
else
echo "Please input command. eg: ./$0 filename.sql(hql)"
fi