大数据——Spark连接数据库

导入log4j相关依赖

      <!--  log  -->
     <dependency>
        <groupId>org.slf4j</groupId>
        <artifactId>slf4j-api</artifactId>
        <version>1.7.30</version>
      </dependency>
      <dependency>
        <groupId>org.slf4j</groupId>
        <artifactId>slf4j-log4j12</artifactId>
        <version>1.7.30</version>
      </dependency>
      <dependency>
        <groupId>log4j</groupId>
        <artifactId>log4j</artifactId>
        <version>1.2.17</version>
      </dependency>

导入Mysql、hive依赖

      <!-- spark sql -->
      <dependency>
        <groupId>org.apache.commons</groupId>
        <artifactId>commons-lang3</artifactId>
        <version>3.9</version>
      </dependency>
      <dependency>
        <groupId>commons-io</groupId>
        <artifactId>commons-io</artifactId>
        <version>2.4</version>
      </dependency>
      <dependency>
        <groupId>org.jeecgframework.nacos</groupId>
        <artifactId>nacos-client</artifactId>
        <version>1.4.1</version>
      </dependency>

      <!-- hive 依赖 -->
      <dependency>
        <groupId>org.apache.spark</groupId>
        <artifactId>spark-hive_2.12</artifactId>
        <version>3.0.1</version>
      </dependency>

      <!-- Mysql依赖 -->
      <dependency>
        <groupId>mysql</groupId>
        <artifactId>mysql-connector-java</artifactId>
        <version>5.1.48</version>
      </dependency>

导入配置文件

将hadoop313/etc/hadoop下的core-site.xml和hdfs-site.xml文件导入工程resource文件夹中

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
    <!--配置namenode的地址-->
    <name>fs.defaultFS</name>
    <value>hdfs://192.168.131.200:9820</value>
</property>
<property>
    <!--配置数据存储目录-->
    <name>hadoop.tmp.dir</name>
    <value>/opt/software/hadoop313/data</value>
</property>
<property>
    <!--配置HDFS网页登录使用的静态用户为root-->
    <name>hadoop.http.staticuser.user</name>
    <value>root</value>
</property>
<property>
	<!--配置root(超级用户)允许通过代理访问的主机节点-->
	<name>hadoop.proxyuser.root.hosts</name>
	<value>*</value>
</property>
<property>
	<!--配置root(超级用户)允许通过代理用户所属组-->
	<name>hadoop.proxyuser.root.groups</name>
	<value>*</value>
</property>
<property>
	<!--配置root(超级用户)允许通过代理的用户-->
	<name>hadoop.proxyuser.root.user</name>
	<value>*</value>
</property>
</configuration>
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
	<!--配置namenode web访问地址-->
	<property>
		<name>dfs.namenode.http-address</name>
		<value>192.168.131.200:9870</value>
	</property>
	<!--配置sencondary namenode web访问地址-->
 	<property>
                <name>dfs.namenode.secondary.http-address</name>
                <value>192.168.131.200:9868</value>
        </property>
	<!--配置hdfs副本数量-->
	 <property>
                <name>dfs.replication</name>
                <value>1</value>
	 </property>
</configuration>

将hive312/conf下的hive-site.xml文件导入工程resource文件夹中

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
	<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
	<configuration>
	  <property>
		<name>javax.jdo.option.ConnectionURL</name>
		<value>jdbc:mysql://192.168.131.200:3306/hive312?createDatabaseIfNotExist=true&amp;useSSL=false</value>
		<description>connect to mysql for hive metastore</description>
	  </property>
	  <property>
		<name>javax.jdo.option.ConnectionDriverName</name>
		<value>com.mysql.jdbc.Driver</value>
		<description>driver for mysql</description>
	  </property>

	  <property>
		<name>javax.jdo.option.ConnectionUserName</name>
		<value>root</value>
		<description>username to mysql</description>
	  </property>
	  <property>
		<name>javax.jdo.option.ConnectionPassword</name>
		<value>12345678</value>
		<description>password to mysql</description>
	  </property>
	  <property>
                <name>hive.server2.authentication</name>
                <value>NONE</value>
          </property>
          <!--root用户访问放过-->
          <property>
                <name>hive.server2.thrift.client.user</name>
                <value>root</value>
          </property>
          <property>
                <name>hive.server2.thrift.client.password</name>
                <value>root</value>
          </property>

	</configuration>

将log4j.properties文件导入到工程resource文件夹中

log4j.rootLogger=ERROR,C,R

log4j.appender.C=org.apache.log4j.ConsoleAppender
log4j.appender.C.layout=org.apache.log4j.PatternLayout
log4j.appender.c.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [%c]-[%p] %m%n

log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.Encoding=UTF-8
log4j.appender.R.File=user_act_log/prolog.log
log4j.appender.R.MaxFileSize=128MB
log4j.appender.R.MaxBackupIndex=40
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [%c]-[%p] %m%n

 

 连接hive代码

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
object SparkHive02 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("sparkhive02")
      .enableHiveSupport() 
      .getOrCreate()
    import spark.implicits._
    spark.sql("select * from test.student")
      .show()
    spark.close()
  }
}

连接Mysql代码

import java.util.Properties

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}


//sql 函数(如:聚合函数,窗口函数,window子句)
import org.apache.spark.sql.functions._


object Test01Sql {
  case class Record(shopId:String,date:String,volume:String)
  def main(args: Array[String]): Unit = {

    val spark: SparkSession =
      SparkSession.builder()
        .master("local[*]")
        .appName("spark_sql_01")
        .getOrCreate()
    val URL = "jdbc:mysql://192.168.131.200/test?useSSL=false"
    val TABLE = "user_info"
    val PRO = new Properties()
    PRO.setProperty("driver","com.mysql.jdbc.Driver")
    PRO.setProperty("user","root")
    PRO.setProperty("password","12345678")

    spark.read.jdbc(URL,TABLE,PRO).createTempView("user_info")
    spark.sql(
      """
        |select * from user_info
        |""".stripMargin)
        //.printSchema()
        .select("*")
        .show()
    spark.close()
  }
}

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Vicky_Tang

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值