pom:
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.fengrui</groupId>
<artifactId>ScalaCount</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<scala.version>2.11.12</scala.version>
<spark.version>2.3.3</spark.version>
<hadoop.version>2.8.1</hadoop.version>
<encoding>UTF-8</encoding>
</properties>
<dependencies>
<!-- 导入scala的依赖 -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<!-- 导入spark的依赖 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- 导入sparkSQL的依赖 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- 指定hadoop-client API的版本 -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<!-- mysql连接依赖-->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<scope>runtime</scope>
<version>8.0.16</version>
</dependency>
</dependencies>
<build>
<pluginManagement>
<plugins>
<!-- 编译scala的插件 -->
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.2</version>
</plugin>
<!-- 编译java的插件 -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.5.1</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<executions>
<execution>
<id>scala-compile-first</id>
<phase>process-resources</phase>
<goals>
<goal>add-source</goal>
<goal>compile</goal>
</goals>
</execution>
<execution>
<id>scala-test-compile</id>
<phase>process-test-resources</phase>
<goals>
<goal>testCompile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>compile</goal>
</goals>
</execution>
</executions>
</plugin>
<!-- 打jar插件 -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
方式一:
package spark.sql
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
object SparkSql {
case class Person(id: String, name: String, age: Int)
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
conf.setAppName("sparkSql")
conf.setMaster("local")
val sc = new SparkContext(conf)
//创建SQLcontext对象
val sqlc = new SQLContext(sc)
val lines: RDD[Array[String]] = sc.textFile("G:\\person.txt").map(_.split(" "))
//将获取数据关联到样例类中
val person: RDD[Person] = lines.map(x => Person(x(0),x(1),x(2).toInt))
//toDF相当于反射,这里若要使用的话,需要导入包,包必须放入作用域中,不能放到最上面
import sqlc.implicits._
/**
* DataFrame [_1:int,_2:String,_3:Int]
* spark-shell 数据是一个自己生成并行化数据并没有使用样例类来 存数据而是直接使用
* 直接调用toDF的时候,使用就是默认列名 _+数字 数字从1开始逐渐递增
* 可以在调用toDF方法的时候指定类的名称(指定名称多余数据会报错)
*
* 列名不要多余,也不要少于
* 也就是说列名要和数据一一对应
*
* 使用代码编程数据是存储到样例类中,样例类中的构造方法中的参数就是对应的列名
* 所以通过toDF可以直接获取对应的属性名作为列名使用
* 同时也可以自定义列名
*
*/
val personDF: DataFrame = person.toDF()
// val personDF: DataFrame = person.toDF("ID","Name","Age")
personDF.show()
//使用sql语法,注册临时表,这个表相当于储存在SQLContext中所创建对象中
personDF.registerTempTable("t_person")
//根据id降序排序
val sql = "select * from t_person where age > 20 order by id desc"
//查询
val res: DataFrame = sqlc.sql(sql)
//默认打印是二十行
res.show()
//输出路径
res.write.mode("append").save("G:/PersonOut.txt")
}
}
方式二:
package spark.sql
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext, types}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.{SparkConf, SparkContext}
object SparkSql2 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
conf.setAppName("SparkSql2")
conf.setMaster("local")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
//获取数据并拆分
val lines: RDD[Array[String]] = sc.textFile("G:/person.txt").map(_.split(" "))
//创建StrucType对象 封装了数据结构(类似于表结构)
val structType: StructType = StructType{
List(
// 列名 数据类型 是否可以为空值
StructField("id",IntegerType,false),
StructField("name",StringType,true),
StructField("age",IntegerType,false)
//列需要和数据对应,但是StructType这种可以:
/**
* 列的数据大于数据,所对应列的值应该是null
* 列数是不能小于数据,不然会抛出异常
* StructField("oop", IntegerType, false)
* StructField("poo", IntegerType, false)
*/
)
}
//将数据进行一个映射操作
val row: RDD[Row] = lines.map(x => Row(x(0).toInt,x(1),x(2).toInt))
//将RDD转化为Dataformat
val personDF: DataFrame = sqlContext.createDataFrame(row,structType)
personDF.show()
}
}
方式三:
package spark.sql
import java.util.Properties
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.{SparkConf, SparkContext}
/**
* 将文件中的数据映射到MySQL在数据库中
*/
object SparkSql3 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
conf.setAppName("SparkSql3")
conf.setMaster("local")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
//获取数据拆分
val lines: RDD[Array[String]] = sc.textFile("G:/person.txt").map(_.split(" "))
//StructType存的表结构
val structType: StructType = StructType{
List(
StructField("id",StringType,false),
StructField("name",StringType,true),
StructField("age",IntegerType,false)
)
}
//开始映射
val row: RDD[Row] = lines.map(x => Row(x(0).toString,x(1).toString,x(2).toInt))
//将当前RDD转化为DataFrame
val personDF: DataFrame = sqlContext.createDataFrame(row,structType)
//创建一个用于写入MySQL配置信息
val prop = new Properties()
prop.put("user","root")
prop.put("password","1234")
prop.put("driver","com.mysql.cj.jdbc.Driver")
//提供MySQL了的url
val jdbcurl = "jdbc:mysql://localhost/test?characterEncoding=utf-8&serverTimezone=UTC"
//表名
val table = "person"
//数据库要对,表若不存在会自动创建并存储
//需要将数据写入到jdbc
//propertities的实现是HashTable
personDF.write.mode("append").jdbc(jdbcurl,table,prop)
println("恭喜冯总喜提拉法一台")
sc.stop()
}
}