spark 写 elasticsearch

[root@hadoop000 bin]# ./spark-submit --master local[2] --jars /root/app/spark-2.3.0-bin-2.6.0-cdh5.7.0/jars/elasticsearch-spark-20_2.11-6.3.0.jar /root/app/spark-2.3.0-bin-2.6.0-cdh5.7.0/python/spark008.py
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import udf


def get_grade(value):
    if value <= 50 and value >= 0:
        return "健康"
    elif value <= 100:
        return "中等"
    elif value <= 150:
        return "对敏感人群不健康"
    elif value <= 200:
        return "不健康"
    elif value <= 300:
        return "非常不健康"
    elif value <= 500:
        return "危险"
    elif value > 500:
        return "爆表"
    else:
        return None

if __name__ == '__main__':
        spark = SparkSession.builder.appName("project").getOrCreate()
        #能够自动推导 
#       df = spark.read.format("csv").option("header","true").option("inferSchema","true").option("delimiter",",").load("file:///root/data/Beijing_2017_HourlyPM25_created20170803.csv")
        data2016 = spark.read.format("csv").option("header","true").option("inferSchema","true").load("/data/Beijing_2016_HourlyPM25_created20170201.csv")

  #  data2016 = spark.read.format("csv").option("header","true").option("inferSchema","true").load("file:///home/hadoop/data/Beijing_2016_HourlyPM25_created20170201.csv").select("Year","Month","Day","Hour","Value","QC Name")
  #  data2015 = spark.read.format("csv").option("header","true").option("inferSchema","true").load("file:///home/hadoop/data/Beijing_2015_HourlyPM25_created20160201.csv").select("Year","Month","Day","Hour","Value","QC Name")

 #    data2017.show()
 #   data2016.show()
 #   data2015.show()
        grade_function_udf = udf(get_grade,StringType())
    # 进来一个Value,出去一个Grade
   # group2017 = data2017.withColumn("Grade",grade_function_udf(data2017['Value'])).groupBy("Grade").count()
        group2016 = data2016.withColumn("Grade",grade_function_udf(data2016['Value'])).groupBy("Grade").count()
   # group2015 = data2015.withColumn("Grade",grade_function_udf(data2015['Value'])).groupBy("Grade").count()
   # group2015.select("Grade", "count", group2015['count'] / data2015.count()).show()
        #group2016.select("Grade", "count", group2016['count'] / data2016.count()).show()
   # group2017.select("Grade", "count", group2017['count'] / data2017.count()).show()
        #withColumn 列的值
        result2016 = group2016.select("Grade", "count").withColumn("precent",group2016['count'] / data2016.count()*100)
   # group2017.show()
        #group2016.show()
   # group2015.show()
        #df.show()
#       df.printSchema()
        #列的名字改成小写的 
        result2016.selectExpr("Grade as grade", "count", "precent").write.format("org.elasticsearch.spark.sql").option("es.nodes","192.168.194.151:9200").mode("overwrite").save("weather2016/pm")

        spark.stop()

在这里插入图片描述

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

伟伟哦

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值