《2020/01/07》sparks代码编写

简单把自己学习写的代码整理一下,没有理论东西。

scala代码

pom文件

<properties>
        <encoding>UTF-8</encoding>
        <java.version>1.8</java.version>
        <scala.version>2.12.8</scala.version>
        <scala.binary.version>2.11</scala.binary.version>
        <spark.version>2.4.4</spark.version>
        <es.version>6.6.2</es.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <scope>runtime</scope>
            <version>8.0.18</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming-kafka-0-10_${scala.binary.version}</artifactId>
            <version>2.4.4</version>
        </dependency>

        <dependency>
            <groupId>org.elasticsearch</groupId>
            <artifactId>elasticsearch-spark-20_${scala.binary.version}</artifactId>
            <version>${es.version}</version>
        </dependency>

    </dependencies>

    <build>
        <finalName>sparks</finalName>
        <plugins>
            <plugin>
                <groupId>net.alchim31.maven</groupId>
                <artifactId>scala-maven-plugin</artifactId>
                <version>3.2.0</version>
                <executions>
                    <execution>
                        <goals>
                            <goal>compile</goal>
                            <goal>testCompile</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>

            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>2.4.3</version>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <!--<finalName>sparkjob-${version}-with-lib</finalName>-->
                            <finalName>framework-with-lib</finalName>
                            <filters>
                                <filter>
                                    <artifact>*:*</artifact>
                                    <excludes>
                                        <exclude>META-INF/*.SF</exclude>
                                        <exclude>META-INF/*.DSA</exclude>
                                        <exclude>META-INF/*.RSA</exclude>
                                        <exclude>application.properties</exclude>
                                    </excludes>
                                </filter>
                            </filters>
                        </configuration>
                    </execution>
                </executions>
            </plugin>

        </plugins>
    </build>

代码示例

SparkStream
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.elasticsearch.spark.rdd.EsSpark

import scala.collection.mutable

object KafkaData {

  // 读取文件数据流
  def fileData(fileDir: String): Unit = {
    val sc: SparkConf = new SparkConf().setAppName("FileData").setMaster("local[*]")
    val context: StreamingContext = new StreamingContext(sc, Seconds(10))

    // 监控文件创建DStream
    val dirStream = context.textFileStream(fileDir)
    val zz = dirStream.flatMap(_.split("_"))
      .map((_, 1)).reduceByKey(_ + _)
    zz.print()

    context.start()
    context.awaitTermination()
  }

  // RDD队列
  def rddData(): Unit = {
    val sc: SparkConf = new SparkConf().setAppName("FileData").setMaster("local[*]")
    val context: StreamingContext = new StreamingContext(sc, Seconds(10))

    val rddQueue = new mutable.Queue[RDD[Int]]()
    val dataStream = context.queueStream(rddQueue, oneAtATime = false)
    val zz = dataStream.map((_, 1)).reduceByKey(_ + _)
    zz.print()

    context.start()
    for (i <- 1 to 5) {
      rddQueue += context.sparkContext.makeRDD(1 to 10, 10)
      Thread.sleep(2000)
    }
    context.awaitTermination()
  }

  // kafka 数据源 // todo
  def kafkaData(): Unit = {
    val sc: SparkConf = new SparkConf().setAppName("KafkaData").setMaster("local[*]")
    val context: StreamingContext = new StreamingContext(sc, Seconds(10))

    val kafkaHost = "127.0.0.1:30003,127.0.0.1:30004,127.0.0.1:30005"
    val groupId = "spark"

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> kafkaHost,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val topics = Array("bz_douyin_new")

    val stream = KafkaUtils.createDirectStream[String, String](
      context,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )
    val zz = stream.map(record => (record.key, record.value)).print()

    context.start()
    context.awaitTermination()
  }

  // 使用 netstat 发送数据
  def netData(): Unit = {
    val sc: SparkConf = new SparkConf().setAppName("KafkaData").setMaster("local[*]")
    val context: StreamingContext = new StreamingContext(sc, Seconds(5))

    val socket = context.socketTextStream("127.0.0.1", 9999)
    var zz = socket.flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _)
    zz.print()

    context.start()
    context.awaitTermination()
  }

  // 连接es
  def esData(): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("esData").setMaster("local[*]")
    sparkConf.set("cluster.name", "es")
    sparkConf.set("es.index.auto.create", "true")
    sparkConf.set("es.nodes", "127.0.0.1")
    sparkConf.set("es.port", "30006")
    sparkConf.set("es.index.read.missing.as.empty","true")
//    sparkConf.set("es.net.http.auth.user", "elastic") //访问es的用户名
//    sparkConf.set("es.net.http.auth.pass", "changeme") //访问es的密码
    sparkConf.set("es.nodes.wan.only","true")
    val sc = new SparkContext(sparkConf)

    val rdd = EsSpark.esRDD(sc, "tuerqi_kuaishou/items", "?q=*")
    println(rdd.count())
    rdd.foreach(line => {
      val key = line._1
      val value = line._2

      for (tmp <- value) {
        val key1 = tmp._1
        val value1 = tmp._2
      }
    })
  }

  def commentData(): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("esData").setMaster("local[*]")
    sparkConf.set("cluster.name", "es")
    sparkConf.set("es.index.auto.create", "true")
    sparkConf.set("es.nodes", "127.0.0.1")
    sparkConf.set("es.port", "30006")
    sparkConf.set("es.index.read.missing.as.empty","true")
    //    sparkConf.set("es.net.http.auth.user", "elastic") //访问es的用户名
    //    sparkConf.set("es.net.http.auth.pass", "changeme") //访问es的密码
    sparkConf.set("es.nodes.wan.only","true")
    val sc = new SparkContext(sparkConf)

    val rdd = EsSpark.esRDD(sc, "tuerqi_kuaishou/items", "?q=*")
    println(rdd.count())
    rdd.foreach(line => {
      val key = line._1
      val value = line._2

      for (tmp <- value) {
        val key1 = tmp._1
        val value1 = tmp._2
      }
    })
  }


  def main(args: Array[String]): Unit = {
    val fileDir = "F:\\documents\\sparks\\zz"

    esData()
  }

}
SparkTable
import java.util.Properties

import org.apache.spark.SparkConf
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}
object FaceTable {

  def getDataFromJson(): Unit = {
    val spark = SparkSession.builder()
      .appName("spark mysql")
      .master("local")
      .getOrCreate()

    import spark.implicits._

//    val df = spark.read.json("F:\\user.json")
//    df.show()

//    val df = spark.read
//        .option("multiLine", true).option("mode", "PERMISSIVE")
//        .json("F:\\user.json")
    val df = spark.read.json("F:\\user.json")
    df.show()
    df.filter($"age" > 21).show()
    // 创建临时表
    df.createOrReplaceGlobalTempView("users_tt")
    spark.sql("select * from global_temp.users_tt").show()
//    df.createTempView("users_tt")
//    spark.sql("select * from users_tt").show()

    spark.close()
  }

  // 通过case class 创建DataFrame
  def test2(): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("SparkSql").setMaster("local[*]")
    val sc: SparkSession = SparkSession.builder().config(conf)
      // .enableHiveSupport()
      .getOrCreate()

    val peopleRdd = sc.sparkContext.textFile("file:\\F:\\documents\\sparks\\persons.txt")
      .map(line => Person(line.split(",")(0), line.split(",")(1).toInt))
    import sc.implicits._
    // 将 RDD 转换为 DataFrames
    val df: DataFrame = peopleRdd.toDF
    df.createOrReplaceTempView("persons")
    sc.sql("select * from persons").show()
    sc.close()
  }

  // 方法二,通过 structType 创建 DataFrames(编程接口)
  def test3(): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("SparkSql").setMaster("local[*]")
    val sc: SparkSession = SparkSession.builder().config(conf)
      .getOrCreate()
    val peopleRdd = sc.sparkContext.textFile("file:\\F:\\documents\\sparks\\persons.txt")
    import org.apache.spark.sql.Row
    val rowRdd = peopleRdd.map(line => {
      val fields = line.split(",")
      Row(fields(0), fields(1).trim.toInt)
    })

    val structType: StructType = StructType(
      // 字段名,字段类型, 是否可以为空
      StructField("name", StringType, true) ::
        StructField("name", IntegerType, true) :: Nil
    )
    val df: DataFrame = sc.createDataFrame(rowRdd, structType)
    df.createOrReplaceTempView("persons")
    sc.sql("select * from persons").show()
    sc.close()
  }

  // mysql 方法1:不指定查询条件
  // 所有的数据由RDD的一个分区处理,如果你这个表数据量很大,表的所有数据都是由RDD的一个分区处理,很可能会出现OOM
  def getFaceTableInfo(): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("SparkSql").setMaster("local[*]")
    val sc: SparkSession = SparkSession.builder().config(conf)
      .getOrCreate()

    val url = "jdbc:mysql://127.0.0.1:10131/faces?"
    val table = "za_person"
    val prop = new Properties()
    prop.setProperty("user", "root")
    prop.setProperty("password", "123456")
    prop.setProperty("driver", "com.mysql.jdbc.Driver")

    //需要传入Mysql的URL、表名、properties(连接数据库的用户名密码)
    val df: DataFrame = sc.read.jdbc(url, table, prop)
    println(df.count())
    println(df.rdd.partitions.length) // 1
    df.createOrReplaceTempView("staff")
    sc.sql("select * from staff where id <=2").show()
    sc.stop()
  }

  /**
    * mysql方式2: 指定数据库字段的范围
    * 通过lowerBound 和 upperBound指定分区的范围
    * 通过columnName 指定分区的列(只支持整型)
    * 通过numPartitions 指定分区数(不宜过大)
    *
    */
  def mysql2(): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("SparkMysql2").setMaster("local[*]")
    val sc: SparkSession = SparkSession.builder().config(conf)
      .getOrCreate()

    val lowerBound = 1
    val upperBound = 2
    val numPartitions = 3
    val url = "jdbc:mysql://192.168.109.132:3306/pehsys?user=root&password=123456"
    val prop = new Properties()

    val df: DataFrame = sc.read.jdbc(url, "pehsys_person", "id", lowerBound, upperBound, numPartitions, prop)
    df.createOrReplaceTempView("person")
    println(df.count())
    println(df.rdd.partitions.length)
    sc.sql("select * from person").show()
    sc.close()
  }

  /**
    * msyql方式3: 根据任意字段进行分区
    * 通过predicates将数据根据score分为2个区
    *
    */
  def mysql3(): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("SparkMysql3").setMaster("local[*]")
    val sc: SparkSession = SparkSession.builder().config(conf)
      .getOrCreate()

    val url = "jdbc:mysql://192.168.109.132:3306/pehsys?user=root&password=123456"
    val prop = new Properties()
    val predicates = Array[String]("id <= 2", "id > 1 and id < 3")
    val df: DataFrame = sc.read.jdbc(url, "pehsys_person", predicates, prop)
    println(df.rdd.partitions.length)
    df.createOrReplaceTempView("person")
    sc.sql("select * from person").show()
    sc.close()
  }

  def mysqlTest(): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("SparkSql").setMaster("local[*]")
    val sc: SparkSession = SparkSession.builder().config(conf)
      .getOrCreate()
    val url = "jdbc:mysql://192.168.109.132:3306/pehsys?user=root&password=123456"
    val prop = new Properties()
    val tableName = "pehsys_person"

    import sc.implicits._

    val df: DataFrame = sc.read.jdbc(url, tableName, prop)
    val result = df.rdd.map(rowData => {
      val similarity = scala.util.Random.nextInt(100)
      (rowData(0), similarity)
    }).sortBy(_._2, false)

    val zz = result.collect()
    sc.close()
  }

  // 效率及缓存

  def main(args: Array[String]): Unit = {
    mysqlTest()
  }

}

case class Person(var name: String, var age: Int)
Window Operations
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 有状态转换操作
  */
object DataWindow {

  // UpdateStateByKey
  def updateData(): Unit = {
    // 定义更新状态方法,参数 values 为当前批次单词频度,state 为以往批次单词频度
    val updateFunc = (values: Seq[Int], state: Option[Int]) => {
      val currentCount = values.foldLeft(0)(_ + _)
      val previousCount = state.getOrElse(0)
      Option(currentCount + previousCount)
    }

    val sc: SparkConf = new SparkConf().setAppName("KafkaData").setMaster("local[*]")
    val context: StreamingContext = new StreamingContext(sc, Seconds(5))
    context.checkpoint(".")

    val socket = context.socketTextStream("127.0.0.1", 9999)
    val zz: DStream[(String, Int)] = socket.flatMap(_.split(" ")).map((_, 1))
    val zzDStram = zz.updateStateByKey[Int](updateFunc)
    zzDStram.print()

    context.start()
    context.awaitTermination()
  }

  // Window Operations
  /**
    * 基于窗口的操作会在一个比StreamingContext 的批次间隔更长的时间范围内,通过整合多个批次的结果,计算出整个窗口的结果
    * 基于窗口的操作需要两个参数,分别为窗口时长以及滑动步长,两者都必须是StreamContext的批次间隔的整数倍。
    * 滑动步长的默认值与批次间隔相同
    */
  def windowData(): Unit = {
    val sc: SparkConf = new SparkConf().setAppName("KafkaData").setMaster("local[*]")
    val context: StreamingContext = new StreamingContext(sc, Seconds(5))
    context.checkpoint(".")

    val socket = context.socketTextStream("127.0.0.1", 9999)
    val zz: DStream[(String, Int)] = socket.flatMap(_.split(" ")).map((_, 1))
    val zzDStream = zz.reduceByKeyAndWindow((a: Int, b: Int) => (a + b), Seconds(15), Seconds(10))
    zzDStream.print()

    context.start()
    context.awaitTermination()
  }

  // 4.3 其他重要操作 Transform 与 Join
  def transformData(): Unit = {
    val sc: SparkConf = new SparkConf().setAppName("KafkaData").setMaster("local[*]")
    val context: StreamingContext = new StreamingContext(sc, Seconds(5))

    val wordRdd = context.sparkContext.makeRDD(Array("aa", "a", "z")).map((_, 1))

    val socket = context.socketTextStream("127.0.0.1", 9999)
    val zz = socket.flatMap(_.split(" ")).map((_, 1))
    val joinData = zz.transform {
      rdd => rdd.++(wordRdd)
    }

    joinData.reduceByKey(_ + _).print()
    context.start()
    context.awaitTermination()
  }

  //



  def main(args: Array[String]): Unit = {
    transformData()
  }
}

java代码

pom文件

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.2.0.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <groupId>cn.lhcz</groupId>
    <artifactId>mars</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>mars</name>
    <description>Demo project for Spring Boot</description>

    <properties>
        <java.version>1.8</java.version>
        <HikariCP.version>3.3.1</HikariCP.version>
        <mybatis.spring.boot.version>2.0.0</mybatis.spring.boot.version>
        <okhttp.version>3.13.1</okhttp.version>
        <json.version>2.3</json.version>
        <jna.version>3.0.9</jna.version>
        <httpcore.version>4.4.12</httpcore.version>
        <httpmime.version>4.5.10</httpmime.version>
        <scala.binary.version>2.11</scala.binary.version>
        <spark.version>2.4.4</spark.version>
        <es.version>6.6.2</es.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-jdbc</artifactId>
        </dependency>

        <!--redis-->
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-redis</artifactId>
            <exclusions>
                <exclusion>
                    <groupId>redis.clients</groupId>
                    <artifactId>jedis</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>io.lettuce</groupId>
                    <artifactId>lettuce-core</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <!-- jedis客户端 -->
        <dependency>
            <groupId>redis.clients</groupId>
            <artifactId>jedis</artifactId>
        </dependency>

        <!-- spring2.X集成redis所需common-pool2,使用jedis必须依赖它-->
        <dependency>
            <groupId>org.apache.commons</groupId>
            <artifactId>commons-pool2</artifactId>
            <version>2.5.0</version>
        </dependency>

        <!--mysql-->
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <scope>runtime</scope>
            <version>8.0.18</version>
        </dependency>

        <!--数据库连接池-->
        <dependency>
            <groupId>com.zaxxer</groupId>
            <artifactId>HikariCP</artifactId>
            <version>${HikariCP.version}</version>
        </dependency>

        <!--mybatis依赖-->
        <dependency>
            <groupId>org.mybatis.spring.boot</groupId>
            <artifactId>mybatis-spring-boot-starter</artifactId>
            <version>${mybatis.spring.boot.version}</version>
        </dependency>

        <!--okhttp-->
        <dependency>
            <groupId>com.squareup.okhttp3</groupId>
            <artifactId>okhttp</artifactId>
            <version>${okhttp.version}</version>
        </dependency>


        <dependency>
            <groupId>org.apache.httpcomponents</groupId>
            <artifactId>httpcore</artifactId>
            <version>${httpcore.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.httpcomponents</groupId>
            <artifactId>httpmime</artifactId>
            <version>${httpmime.version}</version>
        </dependency>

        <!--json包-->
        <dependency>
            <groupId>net.sf.json-lib</groupId>
            <artifactId>json-lib</artifactId>
            <version>${json.version}</version>
            <classifier>jdk15</classifier>
        </dependency>

        <dependency>
            <groupId>com.sun.jna</groupId>
            <artifactId>jna</artifactId>
            <version>${jna.version}</version>
        </dependency>

        <!-- es依赖 -->
       <dependency>
            <groupId>org.elasticsearch.client</groupId>
            <artifactId>elasticsearch-rest-high-level-client</artifactId>
            <version>${es.version}</version>
        </dependency>

        <!-- 添加spark相关依赖包 -->
        <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-core -->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>org.elasticsearch</groupId>
            <artifactId>elasticsearch-spark-20_${scala.binary.version}</artifactId>
            <version>${es.version}</version>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
            <exclusions>
                <exclusion>
                    <groupId>org.junit.vintage</groupId>
                    <artifactId>junit-vintage-engine</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
        </plugins>
    </build>

</project>

代码

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.sql.SparkSession;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.spark.rdd.api.java.JavaEsSpark;
import scala.Tuple2;

import java.util.*;
import java.util.function.Function;

public class YqEsSpark {

    private static String COMMENT_INDEX = "tuerqi_kuaishou/items";

    private static String ES_HOST = "127.0.0.1";
    private static String ES_PORT = "9200";

    // 往es里面写数据
    public static void writeEs() {
        SparkConf sparkConf = new SparkConf().setAppName("commentData").setMaster("local[*]")
                .set("es.index.auto.create", "true")
                .set("es.nodes", ES_HOST)
                .set("es.port", ES_PORT)
                .set("es.index.read.missing.as.empty","true")
                .set("es.nodes.wan.only", "true");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).getOrCreate();
        JavaSparkContext jsc = new JavaSparkContext(sparkSession.sparkContext());

        Map<String, ?> map1 = ImmutableMap.of("one", 1, "two", 2);
        Map<String, ?> map2 = ImmutableMap.of("first", "guo", "second", "xiaozhong");
        JavaRDD<Map<String, ?>> rddData = jsc.parallelize(ImmutableList.of(map1, map2));
        String indexName = "test/doc";
        JavaEsSpark.saveToEs(rddData, indexName);

        sparkSession.close();
    }

    // 读取es里面的数据
    public static void readEs() {
        SparkConf sparkConf = new SparkConf().setAppName("commentData").setMaster("local[*]")
                .set("es.index.auto.create", "true")
                .set("es.nodes", ES_HOST)
                .set("es.port", ES_PORT)
                .set("es.index.read.missing.as.empty","true")
                .set("es.nodes.wan.only", "true");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).getOrCreate();
        JavaSparkContext jsc = new JavaSparkContext(sparkSession.sparkContext());

        String queryStr = "?q=*";
        // 使用快捷键 ctrl+alt+v
        JavaRDD<Map<String, Object>> values = JavaEsSpark.esRDD(jsc, COMMENT_INDEX, queryStr).values();
        for (Map<String, Object> item: values.collect()) {
            item.forEach((key, value) -> {
                System.out.println("key:    " + key + "     value:  " + value.toString());
            });
        }

        sparkSession.close();
    }

    // 数据统计
    public static void commentData() {
        SparkConf sparkConf = new SparkConf().setAppName("commentData").setMaster("local[*]")
                .set("es.index.auto.create", "true")
                .set("es.nodes", ES_HOST)
                .set("es.port", ES_PORT)
                .set("es.index.read.missing.as.empty","true")
                .set("es.nodes.wan.only", "true");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).getOrCreate();
        JavaSparkContext jsc = new JavaSparkContext(sparkSession.sparkContext());

        String queryStr = "?q=*";

        // 使用快捷键 ctrl+alt+v
        JavaRDD<Map<String, Object>> values = JavaEsSpark.esRDD(jsc, COMMENT_INDEX, queryStr).values();

        JavaRDD<Map<String, Integer>> zz = values.map(item -> {
            String userId = (String) item.get("user_id");
            return ImmutableMap.of(userId, 1);
        });

        JavaPairRDD<String, Integer> pairRDD = values.mapToPair(item -> {
            String userId = (String) item.get("user_id");
            return new Tuple2<>(userId, 1);
        });

        JavaPairRDD<String, Integer> keyRdd = pairRDD.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer integer, Integer integer2) throws Exception {
                return integer + integer2;
            }
        });

        pairRDD.reduceByKey((Integer integer, Integer integer2) -> {
            return integer + integer2;
        });

        pairRDD.reduceByKey((Integer a, Integer b) -> a+b);

        List<Tuple2<String, Integer>> result = keyRdd.sortByKey().collect();
        System.out.println(result.size());
        // 窄依赖;宽依赖
        sparkSession.close();
    }

    public static void aa() {
        Function<Map<String, String>, Boolean> zz = item -> item.keySet().contains("11");

        Map<String, String> map = new HashMap<>();
        map.put("11", "12");

        Function<Map<String, String>, Map<String, Integer>> mapFunc = item -> {
          if (item.containsKey("11")) {
              return ImmutableMap.of(item.get("11"), 1);
          }
          return null;
        };

        Object result = mapFunc.apply(map);
        System.out.println(1);

        new Thread(() -> {
            System.out.println(1);
        }).start();

        Tuple2<String, Map<String, String>> aa = new Tuple2<>("zz", map);
        Map<String, String>  aamap = aa._2;
        System.out.println(aamap.size());
    }

    // 关注账号 对哪些普通账号进行过评论, 且top
    public static void zzData(JavaSparkContext jsc) {
        String queryStr = "?q=*";
        // 使用快捷键 ctrl+alt+v
        // 首先查询出来关注账号发布的评论
        JavaRDD<Map<String, Object>> values = JavaEsSpark.esRDD(jsc, COMMENT_INDEX, queryStr).values();

        Set<String> accountSet = new HashSet<>();
        Set<String> douyinhaoSet = new HashSet<>();

        JavaRDD<Map<String, Object>> filterRdd = values.filter(item -> {
            String videoUserid = (String) item.get("video_userid");
            String videoDouyinhao = (String) item.get("video_douyinhao");
            // 判断视频发布者是不是关注账号
            if (accountSet.contains(videoUserid) || douyinhaoSet.contains(videoDouyinhao)) {
                return false;
            }
            return true;
        });

        JavaPairRDD<String, String> pairRDD = filterRdd.mapToPair(item -> {
            String userId = (String) item.get("user_id");
            String videoUserid = (String) item.get("videoUserid");
            return new Tuple2<>(userId, videoUserid);
        });

        JavaPairRDD<String, String> distinctRdd = pairRDD.distinct();
        Map<String, Long> countMap = distinctRdd.countByKey();

    }

    // 统计活跃账号数
    public static void getAccountCount(JavaSparkContext jsc) {
        String indexName = "test/doc";

        SearchSourceBuilder builder = new SearchSourceBuilder();
        builder.query(QueryBuilders.termQuery("one", "1"));
        builder.size(1);
        String queryStr = builder.toString();
        // 查询出来一段时间(如两周)发布的评论的数据
        JavaRDD<Map<String, Object>> values = JavaEsSpark.esRDD(jsc, indexName, queryStr).values();
        JavaRDD<Long> userIds = values.map(item -> {
            long userId = (long) item.get("one");
            return userId;
        });

        // Action 操作
        long count = userIds.distinct().count();
        System.out.println(count);
    }


    public static void cc(JavaSparkContext jsc) {
        String indexName = "comment_bayingol_new/newcomment";
        SearchSourceBuilder builder = new SearchSourceBuilder();
        int countDay = 14;
        long endTime = System.currentTimeMillis() / 1000;
        long beginTime = endTime - countDay * 24 * 60 * 60;

        BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();
        boolQueryBuilder.must(QueryBuilders.termsQuery("user_id", "2748415486724619, 63091880486"))
                .filter(QueryBuilders.rangeQuery("comment_time").lte(endTime).gte(beginTime));
        builder.query(boolQueryBuilder);
        String queryStr = builder.toString();
        JavaRDD<Map<String, Object>> values = JavaEsSpark.esRDD(jsc, indexName, queryStr).values();
        long count = values.count();
        System.out.println(count);
    }

    public static void bb() {
        SparkConf sparkConf = new SparkConf().setAppName("bb").setMaster("local[*]");
        JavaSparkContext context = new JavaSparkContext(sparkConf);
        List<Integer> list = Arrays.asList(1, 2, 3, 4, 5);
        JavaRDD<Integer> rddData = context.parallelize(list, 2);
        JavaRDD<Integer> mapRdd = rddData.map(item -> item+1);
        List<Integer> zz = mapRdd.collect();
        System.out.println(1);
        context.close();
    }



    public static void main(String[] args) {
        SparkConf sparkConf = new SparkConf().setAppName("commentData").setMaster("local[*]")
                .set("es.index.auto.create", "true")
                .set("es.nodes", "127.0.0.1")
                .set("es.port", "9200")
                .set("es.index.read.missing.as.empty","true")
                .set("es.nodes.wan.only", "true");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).getOrCreate();
        JavaSparkContext jsc = new JavaSparkContext(sparkSession.sparkContext());
        cc(jsc);
        jsc.close();

    }


}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值