1. PySpark基础数据处理总结

本文 主要介绍pyspark的使用

Spark本机测试环境:/Users/mtdp/work/code/machinelearning/src/common/bigdata/document

SparkAPI: PySpark Overview — PySpark 3.5.0 documentation

代码:

  • /Users/mtdp/work/code/machinelearning/src/common/bigdata/spark_dataframe_utils.py

  • /Users/mtdp/work/code/machinelearning/src/common/bigdata/spark_utils.py

PySpark美团平台使用:

  • 本地调试:可以通过hope将本地编写代码提交集群中运行

外部参考:

Pycharm 配置参数

SPARK_HOME /Users/tangchao/work/bigdata/tools/spark-2.2.1-bin-hadoop2.7
PYTHONPATH /Users/tangchao/work/bigdata/tools/spark-2.2.1-bin-hadoop2.7/python

spark封装代码:

# -*- coding: UTF-8 -*-
import json

import safety_dispatch.common.common_constant as const

from safety_dispatch.common.utils.date_utils import DateUtilsHelper

from pyspark.sql.functions import udf, when, split, lit, explode, concat, concat_ws
from pyspark.sql.types import StringType, LongType, FloatType
from pyspark.sql.types import StructField, StructType


class HiveUtilsHelper:
    # spark data frame 数据类型字段
    SQL_TYPE_DICT = {
        "string": StringType(),
        "bigint": LongType(),
        "float": FloatType(),
        "double": FloatType()
    }

    @staticmethod
    def _read_sql_file_to_str(file_path):
        """
            读取文件中的sql代码
        :param file_path:  sql保持文件
        :return: 返回sql字符串
        """
        with open(file_path) as f_read:
            sql_str = f_read.read()
        return sql_str

    @staticmethod
    def get_table_columns_info(hive_ctx, table_name):
        """
            获取表的字段信息
        :param hive_ctx:
        :param table_name: 表名
        :return: 字段信息,数组形式 [[column1, type1], [column2, type2]]
        """
        sql_str = """
               select *
               from {}
               limit 5
           """.format(table_name)
        print(sql_str)
        data = hive_ctx.sql(sql_str)

        columns_info = [list(info) for info in data.dtypes]
        print("table_name : {}, columns_info".format(table_name, columns_info))
        return columns_info

    @staticmethod
    def pandas_to_hive(hive_ctx, pandas_df, table_name, dt, is_overwrite=True):
        data = hive_ctx.createDataFrame(pandas_df)
        temp_table_name = "tempTable"
        data.registerTempTable(temp_table_name)

        insert_model = "into"
        if is_overwrite:
            insert_model = "overwrite"
        # 写入hive
        insert_sql_str = """
                        insert {insert_model} table {table_name} partition (dt = \"{dt}\")
                        select * from {temp_table_name}
                        """.format(table_name=table_name, insert_model=insert_model,
                                   dt=dt, temp_table_name=temp_table_name)
        print("执行sql插入数据 :\n\t{}".format(insert_sql_str))
        hive_ctx.sql(insert_sql_str)
        print("写入hive 完成")

    @staticmethod
    def hive_to_hive(hive_ctx, table_name, dt=None, sql_file_path=None, sql_str=None, is_overwrite=True):
        """
            执行sql并保存到hive表中
        :param hive_ctx:
        :param table_name: 保存的表名
        :param dt: 时间
        :param sql_file_path: sql文件
        :param sql_str: sql 字符串
        :return:
        """
        print("执行sql并保存到文件中", table_name, dt)
        # 判断从文件中加载sql
        if sql_file_path:
            sql_str = HiveUtilsHelper._read_sql_file_to_str(sql_file_path)

        # 判断是否生成时间
        if dt is None:
            # 默认为昨天到日期
            dt = DateUtilsHelper.get_date_interval_day(time_delta=-1, str_format=const.DT_DATE_FORMAT)

        # 注册UDF
        hive_ctx.sql("CREATE TEMPORARY FUNCTION projHex AS 'com.sankuai.meituan.hive.udf.proj2Hex'")

        # 执行sql加载数据
        print("执行sql加载数据 :\n\t{}".format(sql_str))
        data = hive_ctx.sql(sql_str)

        # 注册临时表
        temp_table_name = "tempTable_{}".format(table_name.replace(".", ""))
        data.registerTempTable(temp_table_name)

        print(data.columns)
        columns = ",".join([column for column in data.columns if column != "dt"])
        print(columns)

        insert_model = "into"
        if is_overwrite:
            insert_model = "overwrite"
        # 写入hive
        insert_sql_str = """
                insert {insert_model} table {table_name} partition (dt = \"{dt}\")
                select {columns} from {temp_table_name}
                """.format(table_name=table_name, insert_model=insert_model,
                           columns=columns,
                           dt=dt, temp_table_name=temp_table_name)
        print("执行sql插入数据 :\n\t{}".format(insert_sql_str))
        hive_ctx.sql(insert_sql_str)
        print("写入hive 完成")

    @staticmethod
    def hive_to_csv(hive_ctx, output_path, sql_file_path=None, sql_str=None):
        """
            执行sql并保存到csv表中
        :param hive_ctx:
        :param output_path: 保存的路径
        :param sql_file_path: sql文件路径
        :param sql_str:
        :return:
        """
        print("执行sql并保存到文件中", output_path, sql_file_path)
        # 判断从文件中加载sql
        if sql_file_path:
            sql_str = HiveUtilsHelper._read_sql_file_to_str(sql_file_path)
            print(sql_str)

        # 执行sql加载数据
        print("执行sql加载数据 :\n\t{}".format(sql_str))
        hive_ctx.sql("CREATE TEMPORARY FUNCTION projHex AS 'com.sankuai.meituan.hive.udf.proj2Hex'")
        data = hive_ctx.sql(sql_str)

        print("执行保存操作,将数据保存至: \n\t{}".format(output_path))
        data.coalesce(1).write.mode('overwrite').option("delimiter", "\t").option("header", "true").csv(output_path)

        print("写入 csv 完成")

    @staticmethod
    def hive_to_export_platform(hive_ctx, in_table_name, out_table_name, id_columns, content_columns, dt):
        """
            将hive表数据写入到湾流中
        :param in_table_name: str, hive表
        :param out_table_name: str, 湾流表名
        :param id_columns: list,
                输出湾流表的id, 如[column1, column2,column3]
                如果hive表和湾流表id不一致, hive表中的字段为[hexhash_id, hour], 而湾流表为[hex_id, time_id]
                    则传入 [hexhash_id:hour, hour:time_id]
        :param content_columns: 内容字段,如果为数组长度为
        :param dt: str 分区的时间标志
        :return:

        案例 如
            传参数
        """
        load_sql_str = """
               select *
               from {}
               where dt = {}
           """.format(in_table_name, dt)

        print("执行sql加载数据 :\n\t{}\n".format(load_sql_str))

        # 加载数据并转换为临时表
        dataframe = hive_ctx.sql(load_sql_str)

        # 是否需要重命名
        rename_columns = {}
        for index, column in enumerate(id_columns):
            print(index, column)
            if ":" in column:
                in_column = column.split(":")[0]
                out_column = column.split(":")[1]
                rename_columns[in_column] = out_column
        # 重命名
        # dataframe
        HiveUtilsHelper().dataframe_to_export_platform(hive_ctx, dataframe, out_table_name, id_columns, content_columns)

    @staticmethod
    def dataframe_convert_platform(hive_ctx, dataframe, id_columns, out_columns_info, content_columns=None):
        """
        :param hive_ctx:
        :param dataframe:
        :param id_columns: list,
                输出湾流表的id, 如[column1, column2,column3]
        :param out_columns_info: 湾流表字段信息
        :param content_columns: 内容字段,如果为数组长度为空,则默认为除id和dt之外的所有字段
        :return:
        """
        if not content_columns:
            content_columns = []
        print("将dataframe数据写入到湾流中")
        print("id_columns = {}".format(id_columns))
        print("content_columns = {}".format(content_columns))

        float_columns = []
        int_columns = []
        str_columns = []

        # 获取输入表各类型字段信息
        for column_name, dtype in dataframe.dtypes:
            # 判断是否为输出的字段,如果输出字段列表为,默认为除ID外的全部字段
            if column_name != const.PARTITION_COLUMN \
                    and column_name not in id_columns \
                    and (len(content_columns) == 0
                         or column_name in content_columns):
                print(column_name, str(dtype))
                if 'int' in str(dtype):
                    int_columns.append(column_name)
                elif 'float' in str(dtype) \
                        or 'double' in str(dtype):
                    float_columns.append(column_name)
                else:
                    str_columns.append(column_name)

        print("\nint_columns = ", int_columns)
        print("float_columns = ", float_columns)
        print("str_columns = ", str_columns)

        def collect_feature_udf(row):
            """
                组成特征的UDF
            :param row: RDD的row
            :return:
            """
            int_data_map = {}
            float_data_map = {}
            str_data_map = {}

            # 按数量类型
            # {"A":1, "B":2}
            for column in int_columns:
                int_data_map[column] = str(row[column])
            for column in float_columns:
                float_data_map[column] = str(row[column])
            for column in str_columns:
                str_data_map[column] = str(row[column])

            result = []
            for column, dtype in out_columns_info:
                if column in id_columns:
                    value = row[column]
                    result.append(const.DATA_TYPE_DICT[dtype](value))
                elif "int" in column:
                    result.append(json.dumps(int_data_map))
                elif "float" in column:
                    result.append(json.dumps(float_data_map))
                elif "str" in column:
                    result.append(json.dumps(str_data_map))

            return result

        # 转化为可以输入到湾流表的形式
        result_ddd = dataframe.rdd.map(lambda row: collect_feature_udf(row))

        struct_type_list = []
        out_columns = []
        for column, dtype in out_columns_info:
            if column == const.PARTITION_COLUMN:
                continue
            if column in id_columns:
                struct_field = StructField(column, HiveUtilsHelper.SQL_TYPE_DICT[dtype])
            else:
                struct_field = StructField(column, StringType())
            struct_type_list.append(struct_field)
            out_columns.append(column)
        schema = StructType(struct_type_list)
        print("schema = ", schema)

        result_df = hive_ctx.createDataFrame(result_ddd, schema=schema)
        # print(result_df.count())
        # print(result_df.show())

        return result_df

    @staticmethod
    def save_to_export_platform(hive_ctx, dataframe, table_name, dt):
        """
            将dataframe数据写入到湾流中
        :param table_name: str, 湾流表名
        :param dataframe: str, 湾流表名
        :param dt: str 分区的时间标志
        :return:

        案例 如
            传参数
        """
        # 生成湾流表 dataframe 字段
        # 注册临时表
        feature_table_name = "to_export_platform_table"
        dataframe.registerTempTable(feature_table_name)

        out_columns = dataframe.columns
        print("out_columns = ", out_columns)
        # 输出到湾流表名
        sql_str = """
                   insert overwrite table {out_table_name} partition (dt=\"{dt}\")
                   select {out_columns}
                     from {feature_table_name}
                   """.format(out_table_name=table_name, dt=dt, out_columns=",".join(out_columns),
                              feature_table_name=feature_table_name)

        print("\n输出到湾流表的sql : \n" + sql_str)
        hive_ctx.sql(sql_str)
        print("\n输出到湾流表完成")

    @staticmethod
    def dataframe_to_hive(hive_ctx, dataframe, table_name, dt):
        """
            将dataframe数据写入到湾流中
        :param table_name: str, 湾流表名
        :param dataframe: str, 湾流表名
        :param dt: str 分区的时间标志
        :return:

        案例 如
            传参数
        """
        # 生成湾流表 dataframe 字段
        # 注册临时表
        table_columns = HiveUtilsHelper.get_table_columns_info(hive_ctx, table_name)

        out_columns = []
        for column, dtype in table_columns:
            if column != 'dt':
                if column not in dataframe.columns:
                    if "str" in str(dtype):
                        dataframe = dataframe.withColumn(column, lit(""))
                    elif "int" in str(dtype):
                        dataframe = dataframe.withColumn(column, lit(0))
                    elif "float" in str(dtype):
                        dataframe = dataframe.withColumn(column, lit(0.0))

                out_columns.append(column)

        print("out_columns = ", out_columns)

        feature_table_name = "to_hive_table"
        dataframe.registerTempTable(feature_table_name)

        # 输出到湾流表名
        sql_str = """
                       insert overwrite table {out_table_name} partition (dt=\"{dt}\")
                       select {out_columns}
                         from {feature_table_name}
                       """.format(out_table_name=table_name, dt=dt, out_columns=",".join(out_columns),
                                  feature_table_name=feature_table_name)

        print("\n输出到湾流表的sql : \n" + sql_str)
        hive_ctx.sql(sql_str)
        print("\n输出到湾流表完成")

一、spark 基础

1.1 创建 SparkContext:

from pyspark import HiveContext, SQLContext, SparkConf, SparkContext
from pyspark import StorageLevel

conf = SparkConf()
conf.set("spark.driver.maxResultSize", "16G")
conf.set("spark.default.parallelism", 200)
sc = SparkContext(conf=conf)
sc.setSystemProperty('spark.driver.maxResultSize', '10g')
# 设置日志输出
#sc.setLogLevel("ERROR")

1.2 创建 SparkSession

API: http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession

参考:SparkSession的三种创建方式_sparksession.builder-CSDN博客

from pyspark import RDD
from pyspark import StorageLevel
from pyspark.sql import SparkSession
from pyspark.sql import DataFrame
from pyspark.sql.types import DoubleType, StringType, IntegerType, FloatType
from pyspark.sql.types import StructField
from pyspark.sql.types import StructType
from pyspark.sql.functions import date_format, to_timestamp
from pyspark.sql.functions import split, regexp_replace

spark_session = SparkSession.builder.appName(app_name)
spark_session = spark_session.master(master)
spark_session = spark_session.config('spark.executor.memory', spark_executor_memory)
for key, value in config_map.items():
    spark_session = spark_session.config(key, value)

# spark_session = spark_session.getOrCreate()
# spark_session = spark_session.enableHiveSupport().getOrCreate()
from pyspark import RDD
from pyspark import StorageLevel
from pyspark.sql import SparkSession
from pyspark.sql import DataFrame

# /Users/mtdp/work/code/machinelearning/src/common/bigdata/spark_utils.py
app_name = "spark 测试"
spark_session = SparkSession.builder.appName(app_name)
spark_session = spark_session.getOrCreate()

1.3 创建 HiveContext

API:http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.HiveContext

conf = SparkConf()
# 先创建 SparkContext
sc = SparkContext(conf=conf)
hive_ctx = HiveContext(sc)

# 注册计算hexhash 的 UDF
hive_ctx.sql("CREATE TEMPORARY FUNCTION projHex AS 'com.sankuai.meituan.hive.udf.proj2Hex'")

二、数据源转换

spark可以并行化处理器都数据集, rdd, DataFrame

数据来源: list, dict, csv, mysql, hbase, hive等等

数据输出: list, csv, mysq, hbase, hive等等

2.1 基础数据源转换

涉及到数据,字段,rdd, dataframe直接互相转换

2.1.1 数组转rdd
def show(x):
    print(x)
lists = [['a', 1], ['b', 2]]
sc = spark_session.sparkContext
rdd = sc.parallelize(lists)
rdd.foreach(f)
2.1.2 数组转DataFrame
# 数组转dataframe
lists = [['a', 1], ['b', 2]]
dataframe = spark_session.createDataFrame(lists)
dataframe.show()
--->>>
+---+---+
| _1| _2|
+---+---+
|  a|  1|
|  b|  2|
+---+---+
2.1.3 字典转DataFrame
dicts = [{'col1':'a', 'col2':1}, {'col1':'b', 'col2':2}]
dataframe = spark_session.createDataFrame(dicts)
dataframe.show()
2.1.4 Rdd转DataFrame
from pyspark.sql.types import DoubleType, StringType, IntegerType, FloatType
from pyspark.sql.types import StructField
from pyspark.sql.types import StructType

PYSPARK_SQL_TYPE_DICT = {
    int: IntegerType(),
    float: FloatType(),
    str: StringType()
}

# 生成RDD
rdd = spark_session.sparkContext.parallelize([['a', 1], ['b', 2]])
# 定义字段信息
field_info_list = [["a", str], ["b", int]]
# 生成 schema
struct_type_list = []
for field_info in field_info_list:
    field = field_info[0]
    data_type = PYSPARK_SQL_TYPE_DICT[field_info[1]]
    struct_field = StructField(field, data_type)
    struct_type_list.append(struct_field)
schema = StructType(struct_type_list)
print(schema)
# 转换为dataframe
dataframe = spark_session.createDataFrame(rdd, schema=schema)
dataframe.show()
2.1.5 DataFrame转RDD:
numeric_columns = []
rdd = dataframe.select(numeric_columns).rdd
# 使用index 或者 列名取数都行
rdd_2 = rdd.map(lambda row: (row[0], row["column_A"]))
2.1.6 RDD转数组
# rdd 会转化为数组元组
data_list = rdd.collect()
# for value1,value1 in data_list:
for line in data_list:
	print(line[0], line[1])
2.1.7 DataFrame转pandas
dataframe.toPandas()
2.1.8pandas转DataFrame

建议处理完空值,然后再强转数据类型之后转DataFrame,否则会保存

# 处理空值
fillna_value = {
    "order_count": 0,
    "complaint_count": 0,
    "complaint_ratio": 0.0,
    "hex_type": 0
}
print("空值处理")
pandas_df = pandas_df.fillna(fillna_value)
print(pandas_df.head())

# 强转类型,建议每一个都要转,object必须转str
print("数据类型处理")
astype_value = {
    "hex_id": str,
    "time_id": int,
    "city_id": int,
    "order_count": int,
    "complaint_count": int,
    "danger_score": int,
    "hex_type": int,
    "complaint_ratio": float
}

# 数据类型处理
for columns, dtype in astype_value.items():
    pandas_df[columns] = pandas_df[columns].astype(dtype)

DataFrame = hive_ctx.createDataFrame(pandas_df)

2.2 本地文件数据转换

涉及到文本,csv,json等转换

2.2.1 文本转RDD
 sc.textFile("file:///home/xuqm/ML_Data/input/synthetic_control.data").map(_.split("\\s+"))
2.2.2 csv转dataframe

API :http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrameReader

参考:

# csv 转 dataframe
data_path = "/Users/mtdp/work/code/machinelearning/data/tabular/iris/iris.csv"
df = spark_session.read.csv(data_path, header=True)
df.show(5)
2.2.3 dataframe 转csv
# 只有一个分区,最终只会写一个文件
dataFrame = dataFrame.coalesce(numPartitions=1)
dataFrame.write.csv(dataframe_path, header=True)
# dataframe.coalesce(1).write.mode('overwrite').option("delimiter", "\t").option("header", "true").csv(output_path)

2.3 外部数据源转换

2.3.1 hive转dataframe
# 方式一
# 先创建HiveContext
hive_ctx.sql("CREATE TEMPORARY FUNCTION projHex AS 'com.sankuai.meituan.hive.udf.proj2Hex'")
sql_str = "select * from test_table"
data = hive_ctx.sql(sql_str)

# 方式二
2.3.2 dataframe转hive
dataframe = 
table_name = ""
insert_model = 
# 注册临时表
temp_table_name = "tempTable_{}".format(table_name.replace(".", ""))
dataframe.registerTempTable(temp_table_name)

columns = ",".join([column for column in dataframe.columns if column != "dt"])
insert_model = "into"
if is_overwrite:
	insert_model = "overwrite"

# 组装插入语句
insert_sql_str = """
    insert {insert_model} table {table_name} partition (dt = \"{dt}\")
    select {columns} from {temp_table_name}
""".format(table_name=table_name, insert_model=insert_model,columns=columns, dt=dt, temp_table_name=temp_table_name)

# 写入hive
hive_ctx.sql(insert_sql_str)
2.3.3 Mysql转DataFrame
ctx = spark_session.sparkContext
url = "jdbc:mysql://{host}:{port}/{database}".format(host=host, port=port, database=database)

dbtable = "(SELECT * FROM {table_name}) tmp".format(table_name=table_name)
dataframe = ctx.read.format("jdbc").options(
                        url=url, 
                        driver="com.mysql.jdbc.Driver",
                        dbtable=dbtable,
                        user=user,
                        password=password
						).load()
2.3.4 DataFrame转Mysql

注意:运行时必须要加mysql-connector-java-5.1.46.jar包

# bin/spark-submit --jars mysql-connector-java-5.1.46.jar spark_utils.py
url = "jdbc:mysql://{host}:{port}/{database}?user={user}&password={" \
						"password}&useUnicode=true&characterEncoding=GBK" \
						.format(host=host, port=port, database=database, user=user, password=password)
properties = {"driver": 'com.mysql.jdbc.Driver'}
dataFrame.write.jdbc(url=url,
                    mode=model,
                    table=table,
                    properties=properties)
2.3.5 HBase转RDD
2.3.6 RDD转HBase
2.3.7 HBase转DataFrame
2.3.8 DataFrame转HBase

三、RDD操作

3.1 转换(Transformation)

map

3.2 行动(Action)

3.3 控制类算子

四、DataFrame操作

Spark 提供了dataframe对数据可以进行丰富的数据处理。

Spark SQL和DataFrames的重要类:

  • pyspark.sql.SparkSession DataFrameSQL功能的 主要入口点。

  • pyspark.sql.DataFrame 分布在命名列中的分布式数据集合。

  • pyspark.sql.Column 一个列中的列表达式DataFrame。

  • pyspark.sql.Row 一行中的数据DataFrame。

  • pyspark.sql.GroupedData 聚合方法,由返回DataFrame.groupBy()。

  • pyspark.sql.DataFrameNaFunctions 处理缺失数据的方法(空值)。

  • pyspark.sql.DataFrameStatFunctions 统计功能的方法。

  • pyspark.sql.functions 可用的内置函数列表DataFrame。

  • pyspark.sql.types 可用的数据类型列表。

  • pyspark.sql.Window 用于处理窗口功能。

主要还是要自己去熟悉和使用API:

pyspark.sql API 文档

https://www.cnblogs.com/kaiwen03/p/9687015.html

4.1 基本操作

describe:

df_clients.describe()
--- >>>
DataFrame[summary: string, client_id: string, joined: string, income: string, credit_score: string]

columns:

输出字段名,类型为list

df_clients.columns
--- >>>
['client_id', 'joined', 'income', 'credit_score']

dtypes:

输出每一列的数据类型

print(df_clients.dtypes)
--- >>>
[('client_id', 'string'), ('joined', 'string'), ('income', 'string'), ('credit_score', 'string')]

for dtype in df_clients.dtypes:
    print(dtype[0], dtype[1])
--- >>>
client_id string
joined string
income string
credit_score string

drop:

删除一列

color_df.drop('length').show()

4.2 sql 语法

select
# 方式一
df_clients.select(df_clients.client_id).show(3)

# 方式二
df_clients.select("client_id").show(3)

# 方式三
columns = ["client_id", "joined"]
df_clients.select(columns).show(3)
where
# 条件过滤
where_cmd = "joined > \"2011-10-14\" and credit_score > 560"
df_clients.where(where_cmd).show(5)

# 判断是否为空
from pyspark.sql.functions import isnan, isnull
# isnull 把列里面数据为null的筛选出来(代表python的None类型)
# isnan 把列里面数据为nan的筛选出来(Not a Number,非数字数据)

# 方法一
df_clients.filter(isnull("joined")).show(5)

# 方法二
df_clients.where(isnull("joined")).show(5)
dictinct
# 删除重复值行
df1.dropDuplicates().show()

# 只要某一列有重复值,则去重
df1.dropDuplicates(subset=['FirstName']).show()
列处理
pyspark.sql.functions.abs(col) #计算绝对值
pyspark.sql.functions.avg(col) #聚合函数:返回组中的值的平均值。
pyspark.sql.functions.variance(col) #返回组中值的总体方差
pyspark.sql.functions.ceil(col) #计算给定值的上限
pyspark.sql.functions.floor(col) #计算给定值的下限。
pyspark.sql.functions.collect_list(col) #返回重复对象的列表。
pyspark.sql.functions.collect_set(col) #返回一组消除重复元素的对象。
pyspark.sql.functions.count(col) #返回组中的项数量。
pyspark.sql.functions.countDistinct(col, *cols) #返回一列或多列的去重计数的新列。
pyspark.sql.functions.initcap(col) #在句子中将每个单词的第一个字母翻译成大写。
pyspark.sql.functions.isnan(col) #如果列是NaN,则返回true的表达式
pyspark.sql.functions.lit(col) #创建一个文字值的列
pyspark.sql.functions.lower(col) #将字符串列转换为小写
pyspark.sql.functions.reverse(col) #反转字符串列并将其作为新的字符串列返回
pyspark.sql.functions.sort_array(col, asc=True) #按升序对给定列的输入数组进行排序
pyspark.sql.functions.split(str, pattern) #按指定字符进行分隔数据
pyspark.sql.functions. array_min (col) #计算指定列的最小值
pyspark.sql.functions. array_max (col) #计算指定列的最大值
pyspark.sql.functions.stddev(col) # 返回组中表达式的无偏样本标准差
pyspark.sql.functions.sumDistinct(col) #返回表达式中不同值的总和
pyspark.sql.functions.trim(col) #去除空格
pyspark.sql.functions. greatest (col1,col2) #求行的最大值,可以计算一行中多列的最大值
pyspark.sql.functions. least (col1,col2) #求行的最小值,可以计算一行中多列的最小值,也可以用lit()指定常数
# 由原来的列产生
df_clients.withColumn("new_credit_score", df_clients['credit_score'] + 200).show(3)

# 由原来的多列产生
from pyspark.sql.functions import concat
df_clients.withColumn("new_credit_score", concat("joined", "income")).show(3)

# 赋值
from pyspark.sql.functions import lit
df_clients.withColumn("new_credit_score", lit(200)).show(3)

# 字符串反转
from pyspark.sql.functions import reverse
dataframe = dataframe.withColumn("cols_1", reverse(dataframe["cols_1"]))
dataframe.show()

from pyspark.sql.types import *
# 数组转字符串
join_udf = udf(lambda x: ",".join(x))
dataframe = dataframe.withColumn("word_str_2", join_udf(dataframe["word_str"]))

# 字符串转数值
split_udf = udf(lambda x: x.split(","), ArrayType(StringType()))
dataframe = dataframe.withColumn("word_str_3", split_udf(dataframe["word_str_2"]))
as 重命名列
df_clients.withColumnRenamed("credit_score", "new_credit_score").show(3)
cast 改变数据类型
from pyspark.sql.types import *
"""
__all__ = [
    "DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType",
    "TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType",
    "LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType"]
"""
df_clients.withColumn("credit_score", df_clients["credit_score"].cast(IntegerType())).show(3)


from pyspark.sql.types import StringType
print("重命名列名")

for dtype in result_df.dtypes:
   column = dtype[0]
   data_type = dtype[1]
   print(dtype[0], dtype[1])
   if "array" in str(data_type):
       result_df = result_df.withColumn(column, result_df[column].cast(StringType()))
case when
from pyspark.sql.functions import when

when_function = when(df_clients['credit_score'] > 700, 2)\
                    .when(df_clients['credit_score'] > 600, 1) \
                    .otherwise(0)
df_clients.withColumn("type", when_function).show(3)

# between(lowerBound, upperBound)
# 筛选出某个范围内的值,返回的是TRUE or FALSE
# credit_score >= 600 and  7credit_score =< 700
df_clients.withColumn("type", df_clients['credit_score'].between(600, 700)).show(3)
group by
# df_loans.show(5) 
from pyspark.sql.functions import mean,max

# 1
df_loans.groupBy('client_id') \
    .agg(mean("loan_amount"), max("loan_amount")).show(3)
# 2
agg_map = {"rate": "mean", "loan_amount":"max"}
df_loans.groupBy('client_id') \
    .agg(agg_map).show(3)


agg_map = {
  "user_num": "sum",
  "order_submit_num": "sum",
  "order_arrive_num": "sum",
  "high_risk_poi_category_num": "sum",
  "poi_category_collect_list": "collect_list"
}
data_frame = all_data_frame.where("city_id == {}".format(city_id))

data_frame = data_frame.groupBy(["hex_id", "city_id"]) \
.agg(agg_map)
data_frame.show()
for cols, fun_name in agg_map.items():
    old_cols = "{}({})".format(fun_name,cols)
    print(old_cols)
    new_cols = cols
    data_frame = data_frame.withColumnRenamed(old_cols, new_cols)
order by
# 默认是升序
df_clients.orderBy(["client_id", "credit_score"]).show(3)

# 降序
from pyspark.sql.functions import desc
df_clients.orderBy(["client_id", desc("credit_score")]).show(3)
join
# `inner``, ``cross``, ``full_outer``, ``left_outer``, ``right_outer``
# 默认为inner
# inner 
# 方法1 
cond = [df_clients['client_id'] == df_loans['client_id']]
df_clients.join(df_loans, cond)\
    .drop(df_loans['client_id'])\
    .show(3)

# 方法1 
cond = ['client_id']
print(df_loans.count()) 
df_clients.join(df_loans, cond).show(3)
print(df_clients.join(df_loans, cond, hw).count()) 

# left_outer 
# 方法1 
cond = [df_clients['client_id'] == df_loans['client_id']]
hw = 'left_outer'
df_clients.join(df_loans, cond, hw)\
    .drop(df_loans['client_id'])\
    .show(3)

# 方法2 
cond = ['client_id']
hw = 'left_outer'
df_clients.join(df_loans, cond, hw).show(3)
union
df_a = df_clients
df_b = df_clients

# union 与 unionAll 一样,都要去重
df_a.union(df_b).show(3)
print("union count :", df_a.union(df_b).count())
print("union count :", df_a.union(df_b).distinct().count())

# 注意 在2.0中弃用,请union()改用。
df_a.unionAll(df_b).show(3)
print("unionAll count :", df_a.unionAll(df_b).count())

# 重点推荐 unionByName
# 如果列名的位置不一致,可以用unionByName,
# 如 df_a 列名顺序["joined", "income", "client_id"], df_b 列名顺序["joined", "client_id", "income"]
# 使用unionByName就可以 union
df_a = df_clients.select(["joined", "income", "client_id"])
df_b = df_clients.select(["joined", "client_id", "income"])
df_a.unionByName(df_b).show(3)
print("unionByName count :", df_a.unionByName(df_b).count())
print("unionByName count :", df_a.unionByName(df_b).distinct().count())

4.3 复杂转换

UDF

使用udf注册udf方法

from pyspark.sql.types import StringType
from pyspark.sql.types import DoubleType
from pyspark.sql.functions import udf

def concat(col1, col2):
    try:
        if col2 != 0:
            return col1 / str(col2)
        else:
            return "not"
    except:
        return "not"
func = udf(concat, StringType())

dataframe2 = dataframe.withColumn("concat_str",func(dataframe["col1"], dataframe["col2"]))
dataframe2.show()
窗口函数
from pyspark.sql.functions import row_number
from pyspark.sql import Window

# 实践案例 一
# row_number() over(order by high_risk_poi_category_num ) as rank
data_frame = data_frame.withColumn("rank", row_number().over(Window.orderBy(desc("high_risk_poi_category_num"))))

# 实践案例 二
# row_number() over(partition by orderid order by high_risk_poi_category_num ) as rank
from pyspark.sql.functions import row_number, desc
from pyspark.sql import Window
window_cmd = Window.partitionBy(["orderid"]) \
                    .orderBy(desc("high_risk_poi_category_num"))
data_frame = data_frame.withColumn("rank", row_number().over(window_cmd))




# 以"loan_type", "repaid" 进行分组排序
# .partitionBy(["loan_type", "repaid"]) \
window_cmd = Window.partitionBy(["client_id","loan_type"]) \
                    .orderBy(desc("rate"))

# df_a = df_loans.select("client_id", "loan_type", "rate", row_number().over(window_cmd).alias("rank"))
df_a = df_loans.withColumn("rank", row_number().over(window_cmd))
df_a.orderBy(desc("client_id"), desc("loan_type"), desc("repaid")).show(10)
多行转一行

concat_ws(',',collect_set(''))

from pyspark.sql.functions import collect_set

# 统计 client_id 所有的借贷类型
df_loans.groupBy("client_id") \
           .agg(collect_set("loan_type")) \
           .show(3)
多列转一列
from pyspark.sql.functions import concat
df_clients.withColumn("new_credit_score", concat("joined", "income")).show(3)
一行转多行

参考:pyspark dataframe将一行分成多行并标记序号(index)_pyspark 切割数据并为每列添加列名-CSDN博客

from pyspark.sql.functions import split, explode, concat, concat_ws
dicts = [{'col1':'a,b,c', 'col2':1}, {'col1':'b,e,f', 'col2':2}]
dataframe = spark_session.createDataFrame(dicts)

dataframe = dataframe.withColumn("col1", explode(split("col1", "[,]")))
dataframe.show()
--- >>>
+----+----+
|col1|col2|
+----+----+
|   a|   1|
|   b|   1|
|   c|   1|
|   b|   2|
|   e|   2|
|   f|   2|
+----+----+


df_split = df.select(f.split(df.value,":")).rdd.flatMap(
              lambda x: x).toDF(schema=["col1","col2","col3"])
df_split.show()
一列转多列

参考:Spark DataFrame 多行转多列_spark core多行转多列-CSDN博客

# 字段转dataframe
from pyspark.sql.functions import split, explode, concat, concat_ws
dicts = [{'col1':'a,b', 'col2':1}, {'col1':'b,e', 'col2':2}]
dataframe = spark_session.createDataFrame(dicts)

split_col = split(dataframe['col1'], ',')
dataframe = dataframe.withColumn('NAME1', split_col.getItem(0))
dataframe = dataframe.withColumn('NAME2', split_col.getItem(1))
dataframe.show()
--- >>>
+----+----+-----+-----+
|col1|col2|NAME1|NAME2|
+----+----+-----+-----+
| a,b|   1|    a|    b|
| b,e|   2|    b|    e|
+----+----+-----+-----+
  • 21
    点赞
  • 25
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值