maven
<!-- https://mvnrepository.com/artifact/org.elasticsearch/elasticsearch-spark-20 -->
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch-spark-20_2.11</artifactId>
<version>6.0.0</version>
</dependency>
将rdd存入es
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.rdd.EsSpark
object test {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("appName")
.config("spark.sql.warehouse.dir","hdfs://master:9000/usr/hive/warehouse")
.config("es.nodes","192.168.123.111:9200,192.168.123.112:9200") //设置es.nodes
.config("pushdown", "true") //执行sql语句时在elasticsearch中执行只返回需要的数据。这个参数在查询时设置比较有用
.config("es.index.auto.create","true") //如果没有这个index自动创建
.config("es.nodes.wan.only","true")
.enableHiveSupport()
.getOrCreate()
val a = Map("name" -> "lx","age"->20,"tags"->Array("aaa","bbb"))
val b = Map("name" -> "wyq","age"->2,"tags"->Array("www","qqq"))
val d = Map("name" -> "shm","age"->11,"tags"->Array("sss","mmm"))
val rdd = spark.sparkContext.makeRDD(Seq(a,b,d))
EsSpark.saveToEs(rdd,"index/type")
}
}
将DataFrame存入es
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.sql.EsSparkSQL
object test {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("appName")
.config("spark.sql.warehouse.dir","hdfs://master:9000/usr/hive/warehouse")
.config("es.nodes","192.168.123.111:9200,192.168.123.112:9200") //设置es.nodes
.config("pushdown", "true") //执行sql语句时在elasticsearch中执行只返回需要的数据。这个参数在查询时设置比较有用
.config("es.index.auto.create","true") //如果没有这个index自动创建
.config("es.nodes.wan.only","true")
.enableHiveSupport()
.getOrCreate()
import spark.implicits._
import spark.sql
sql("use test")
val data = sql("select * from test")
try{
EsSparkSQL.saveToEs(data,"index/type")
}catch {
case es: Exception => {
print(es)
}
}
}
}
elasticsearch默认按照_id字段去重,如果没有指定_id,elasticsearch默认自动创建,如果需要去重我们可以通过指定_id实现去重。
在运用的过程中由于不能直接往 _id中存入数据。所以我通过指定某个字段作为 _id以此实现去重。
EsSparkSQL.saveToEs(data,es_index+"/"+es_type,Map("es.mapping.id" -> "id"))
从es中读出数据导入另外一个索引
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.rdd.EsSpark
import scala.collection.{Map, mutable}
object test_for_spark_es {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("appName")
.config("spark.sql.warehouse.dir","hdfs://master:9000/usr/hive/warehouse")
.config("es.nodes","192.168.123.111:9200,192.168.123.112:9200") //设置es.nodes
.config("pushdown", "true") //执行sql语句时在elasticsearch中执行只返回需要的数据。这个参数在查询时设置比较有用
.config("es.index.auto.create","true") //如果没有这个index自动创建
.config("es.nodes.wan.only","true")
.enableHiveSupport()
.getOrCreate()
val query =
"""
{"query":{"bool":{"must":[{"match_phrase":{"hobbies":"网球"}}]}}}
"""
import org.elasticsearch.spark._
val esrdd: RDD[(String, Map[String, AnyRef])] = spark.sparkContext.esRDD("index/type", query)
val map: RDD[mutable.Map[String, AnyRef]] = esrdd.map(r => {
val key = r._1
val value = r._2
import scala.collection.mutable.Map
//我这里Map泛型为[String,AnyRef]因为我的数据中有object类型,这里需要指定泛型。
val tag: mutable.Map[String, AnyRef] = Map("profession" -> "IT")
for (tmp <- value) {
tag += (tmp._1 -> tmp._2)
}
tag
})
try {
EsSpark.saveToEs(map, "index1/type1")
} catch {
case ex: Exception => {
print(ex)
}
}
}