安装spark//python中os.path.abspath及os.path.join以及正态分布PPF

命令:
vim ~/.bashrc
source ~/.bashrc
ps aux | grep spark
pkill -f "spark"
sudo chown -R sc:sc  spark-2.3.1-bin-hadoop2.7/
sudo mv /home/sc/Downloads/spark-2.3.1-bin-hadoop2.7 /opt/
$SPARK_HOME 查看spark的路径
http://mirrors.tuna.tsinghua.edu.cn/apache/spark/spark-2.3.1/spark-2.3.1-bin-hadoop2.7.tgz

None""在spark sql里都是为null 
python funtools 模块常用函数
/home/sc/PycharmProjects/sc/model-feature-engine/biz/sub/dag.py

################
a = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
b =  os.pardir
c  = os.path.join(__file__)
d1  = os.path.join(__file__,os.pardir)
d2  = os.path.join(__file__,os.pardir,os.pardir)
e1 = os.path.abspath(os.path.join(__file__))
e2 = os.path.abspath(os.path.join(__file__, os.pardir))
print(a)
print(b)
print(c)
print(d1)
print(d2)
print(e1)
print(e2)

结果:
/home/sc/PycharmProjects/sc
..
/home/sc/PycharmProjects/sc/model-feature-engine/temp4.py
/home/sc/PycharmProjects/sc/model-feature-engine/temp4.py/..
/home/sc/PycharmProjects/sc/model-feature-engine/temp4.py/../..
/home/sc/PycharmProjects/sc/model-feature-engine/temp4.py
/home/sc/PycharmProjects/sc/model-feature-engine


Dataframe转RDD
RDD.map(func)
RDD.map(lambda x : func(x))  
上面两个的区别;数据结构不一样?????
piplineRDD has no map

数值和键值对RDD
把一个普通的RDD转化为pairRDD,可调用map函数实现,pairRDD

schemaRDD读取数据和执行查询都会返回SchemaRDD。ShemaRDD和传统数据库里面的表类似,从内部机理来看,SchemaRDD是由一个由Row对象组成的RDD,附带包含每类数据类型的结构信息。
Row对象只是对基本数据类型(如证性和字符串型等)数组的封装。SchemaRDD仍然是RDD

/home/sc/PycharmProjects/risk-model/etl_ljt_script/pysparkCsvUtils.py

self.session.sparkContext.textFile


filename = 'judgedoc_litigant.csv'
    csvspath = readpath + filename
    sqlContext = SQLContext(sparkContext=sc)
    df = spark.sparkContext.textFile(csvspath)
    print(type(df))
    # df = sqlContext.read.text(csvspath)
    # df.show()
    # 表是以逗号做出分隔
    # dfrdd = df.rdd.map(lambda le: le.split(","))
    dfrdd = df.map(mapper)
    # Infer the schema, and register the DataFrame as a table.


什么情况下是pyspark..rdd


    sqlContext = SQLContext(sparkContext=sc)
    df = spark.sparkContext.textFile(hdfspath) #<class 'pyspark.rdd.RDD'>
    print(type(df))





    dfrdd = df.map(mapper)
    print(type(dfrdd))   #<class 'pyspark.rdd.PipelinedRDD'>




from pyspark.sql import SparkSession, SQLContext
from pyspark.sql import Row
from pyspark.sql.functions import udf
from pyspark.sql.types import *

def testjudgedoc(spark, sc):
    '''3)裁判文书(judgedoc_litigant.csv):'''
    filename = 'judgedoc_litigant.csv'
    csvspath = readpath + filename
    sqlContext = SQLContext(sparkContext=sc)
    df = spark.sparkContext.textFile(csvspath)

if __name__ == '__main__':
    spark = SparkSession.builder.master(sparkpath) \
        .appName("SC_ETL_ljt_spark").getOrCreate()
    sc = spark.sparkContext
    sc.addPyFile('pysparkCsvUtils.py')
    sc.addPyFile('caseReasonCode.py')
    sc.addPyFile('case_reason_reflection.py')
    sc.addPyFile('case_reason_map.py')
    # sc.addPyFile('parser.py')
    # sc.addPyFile('judgedoc.model.bin')
    sc.addPyFile('parse_util.py')
    sc.addPyFile('models.py')
    sc.addPyFile('parse_money.py')
    sc.addPyFile('map_litigant_type.py')
    # basic_datasource_example(spark)
    # testzhixing(spark,sc)
    # testRead(spark, sc, filename='company_patent.csv')
    # testjudgedoc(spark, sc)
    testcourtannouncement(spark, sc)
    # testcourtnotice(spark, sc)
    # testshixin(spark, sc)
    # testpublish(spark, sc)
    spark.stop()


    import math
        if revoke_prob == 1.0:
            score = 67
        elif revoke_prob == 0.0:
            score = -133
        else:
            score = (-6.78 + 14.13 * math.log(float(revoke_prob) / float(1.0 - revoke_prob)))
        score = float((score + 133))/2
        return round(score, 2)

In[12]: k1 = -2.78+14.13*math.log(100)
In[13]: k1
Out[13]: 62.291054728011744
In[14]: k1 = -2.78+14.13*math.log(10000)
In[15]: k1
Out[15]: 127.36210945602349
In[16]: k1 = -2.78+14.13*math.log(10000)
In[17]: import scipy.stats as st
In[18]: hyRankXPosition = st.norm.ppf(1, 0.3, 0.1)
In[19]: hyRankXPosition
Out[19]: inf
In[20]: st.norm.ppf(0.5, 0.3, 0.1)
Out[20]: 0.3
In[21]: st.norm.ppf(0.1, 0.3, 0.1)
Out[21]: 0.17184484344553994
In[22]: st.norm.ppf(0.8, 0.3, 0.1)
Out[22]: 0.38416212335729144
In[23]: st.norm.ppf(0.9, 0.3, 0.1)
Out[23]: 0.42815515655446
In[24]: st.norm.ppf(0.9999, 0.3, 0.1)
Out[24]: 0.6719016485455709
In[25]: st.norm.ppf(0.999999999, 0.3, 0.1)
Out[25]: 0.8997807019601636
In[26]: st.norm.ppf(0.9999999999999, 0.3, 0.1)
Out[26]: 1.0348754540300042


WORK_DIR = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir))
_, WORK_PROJ = os.path.split(WORK_DIR)
WORK_ZIP = os.path.join(WORK_DIR, "%s.zip" % WORK_PROJ)
/home/sc/PycharmProjects/sc/model-feature-engine/dag.gv


/home/sc/PycharmProjects/sc/model-feature-engine/biz/spark_session_utils/spark_session_utils.py


 HDFS_RISK_MODEL_NOT_DAILY_MID_SAIC +"/share_as_fr.csv"



def bfs(self):
        source_set = set([link["source"] for link in self.Links])
        target_set = set([link["target"] for link in self.Links])
        root = source_set - target_set

        # 再加上不需要定时跑的表,且表已经存在了
        not_need_to_run = set([link["target"] for link in self.Links if not link["need_to_run"]])
        not_need_to_run_and_exist_set = set([node["id"] for node in self.Nodes if node.get("status") and node["id"] in not_need_to_run])
        root = root.union(not_need_to_run_and_exist_set)
        step_limit = 10000
        pre_set = root

        for i in range(step_limit+1):
            # 查找能当前从存在的表中计算出的下一个表
            links_as_pre_source = [link for link in self.Links if link["source"] in pre_set and not link.get("step")]
            tmp_target_to_add_pre = set()
            for link_as_pre_source in links_as_pre_source:
                tmp_source_set = set([link["source"] for link in self.Links if link["target"] == link_as_pre_source["target"]])
                # 以target为终点的所有依赖的table都存在,则可以运行。
                if len(tmp_source_set - pre_set) == 0:
                    link_as_pre_source["step"] = i
                    tmp_target_to_add_pre.add(link_as_pre_source["target"])

            # 当前step判断完了之后才能加入到pre_set
            pre_set = pre_set.union(tmp_target_to_add_pre)

        to_left_set = target_set - pre_set
        to_left_link = [link for link in self.Links if link["target"] in to_left_set]

        to_run_links = [link for link in self.Links if link["need_to_run"]]
        to_run_links = sorted(to_run_links, key=lambda _: _.get("step"), reverse=False)
        return to_left_link, to_run_links



改进:
def bfs(self):
        source_set = set([link["source"] for link in self.Links])
        target_set = set([link["target"] for link in self.Links])
        root = source_set - target_set

        # 再加上不需要定时跑的表,且表已经存在了
        # not_need_to_run_target = set([link["target"] for link in self.Links if not link["need_to_run"]])
        # not_need_to_run_source = set([link["source"] for link in self.Links if not link["need_to_run"]])
        # not_need_to_run_and_exist_set = set([node["id"] for node in self.Nodes if node.get("status") and node["id"] in not_need_to_run_target])
        # root = root.union(not_need_to_run_and_exist_set)
        # root = root.union(not_need_to_run_source).union(not_need_to_run_source)

        step_limit = 10000
        pre_set = root

        for i in range(1, step_limit+1):
            # 查找能当前从存在的表中计算出的下一个表
            links_as_pre_source = [link for link in self.Links if link["source"] in pre_set and not link.get("step")]
            tmp_target_to_add_pre = set()
            for link_as_pre_source in links_as_pre_source:
                tmp_source_set = set([link["source"] for link in self.Links if link["cls_name"] == link_as_pre_source["cls_name"]])
                # 以target为终点的所有依赖的table都存在,则可以运行。
                if len(tmp_source_set - pre_set) == 0:
                    link_as_pre_source["step"] = i
                    tmp_target_to_add_pre.add(link_as_pre_source["target"])

            # 当前step判断完了之后才能加入到pre_set
            pre_set = pre_set.union(tmp_target_to_add_pre)

        to_left_set = target_set - pre_set
        to_left_link = [link for link in self.Links if link["target"] in to_left_set]

        to_run_links = [link for link in self.Links if link["need_to_run"]]
        to_run_links = sorted(to_run_links, key=lambda _: _.get("step"), reverse=False)
        to_run_links_dif = []
        to_run_cls_name_set = set()

        for a_t in to_run_links:
            if a_t["cls_name"] not in to_run_cls_name_set:
                to_run_links_dif.append(a_t)
                to_run_cls_name_set.add(a_t["cls_name"])

        return to_left_link, to_run_links, to_run_links_dif



、#!/usr/bin/env python
# encoding: utf-8


from conf.conf import SPARK_MASTER_URL, SPARK_TASK_NAME, WORK_ZIP
from pyspark.sql import SparkSession
from biz.sub.dag import SparkTask
import abc


class SparkSessionUtils(SparkTask):

    session = SparkSession.builder \
        .master(SPARK_MASTER_URL) \
        .appName(SPARK_TASK_NAME) \
        .getOrCreate()

    session.conf.set("spark.driver.maxResultSize", "4g")
    session.conf.set("spark.sql.broadcastTimeout", 1200)
    session.conf.set("spark.sql.crossJoin.enabled", "true")
    # session.sparkContext.addPyFile(WORK_ZIP)

    # def add_zip_py(self):
    #     self.session.sparkContext.addPyFile(WORK_ZIP)

    @abc.abstractmethod
    def run_task(self):
        raise NotImplementedError

    def _run_task(self):
        # self.add_zip_py()
        self.run_task()
        self.session.stop()

现在:
from conf.conf import SPARK_MASTER_URL, SPARK_TASK_NAME, WORK_ZIP
from pyspark.sql import SparkSession
from biz.sub.dag import SparkTask
import abc


class SparkSessionUtils(SparkTask):
    session = None

    def __build_session(self):
        session = SparkSession.builder \
            .master(SPARK_MASTER_URL) \
            .appName(SPARK_TASK_NAME) \
            .getOrCreate()

        session.conf.set("spark.driver.maxResultSize", "4g")
        session.conf.set("spark.sql.broadcastTimeout", 1200)
        session.conf.set("spark.sql.crossJoin.enabled", "true")
        self.session = session
        return self.session
        # session.sparkContext.addPyFile(WORK_ZIP)

    # def add_zip_py(self):
    #     self.session.sparkContext.addPyFile(WORK_ZIP)

    @abc.abstractmethod
    def run_task(self):
        raise NotImplementedError

    def _run_task(self):
        self.__build_session()
        # self.add_zip_py()
        self.run_task()
        self.session.stop()



#!/usr/bin/env python
# encoding: utf-8

from conf.all_task_conf import ALL_SPARK_CLASS_TASK
from conf.conf import HDFS_RISK_MODEL_AUTO_RAW
from controller.oslo_utils.importutils import import_class
from biz.sub.dag import TaskDag
from fabric_utils.fabric_utils import FabricHdfsUtils, FabricDbUtils
from scpy.logger import get_logger
from biz.load_raw_data.sub.load_data_to_hdfs import LoadRawData
import json

logger = get_logger(__file__)


class Controller(object):
    """
    控制层
    负责 查看 执行spark task class 里面的那些表存在那些表不存在
    生成计算图,调度计算过程
    """
    def __init__(self):
        self.task_dag = TaskDag()
        self.cls_map = {}
        self._task_run_serial = []
        self.fabric_hdfs_utils = FabricHdfsUtils()
        for cls_dict in ALL_SPARK_CLASS_TASK:
            cls_str = cls_dict.get("cls_name")
            this_cls = import_class(cls_str)
            self.cls_map[cls_str] = this_cls
            a_node_dag = getattr(this_cls(), "get_spark_task")()
            depend_tables = a_node_dag["depend_tables"]
            result_tables = a_node_dag["result_tables"]
            # 构建dag, 添加节点
            self.task_dag.add_nodes(depend_tables+result_tables)
            # 构建dag, 添加边
            self.task_dag.add_dag(cls_dict, depend_tables, result_tables)

    def plot(self):
        self.analyse()
        self.task_dag.plot(view=True)

    def analyse(self):
        # 查看那个表计算是存在的那个表是不存在的。
        self.task_dag.set_table_info(self.fabric_hdfs_utils.hdfs_exits)
        # 做bfs
        to_left_link, self._task_run_serial = self.task_dag.bfs()
        to_left_tables = [_["target"] for _ in to_left_link]
        logger.info("to_left_tables:\n" + json.dumps(to_left_tables, ensure_ascii=False))

    def run_all(self):
        self.analyse()

        for task_dict in self._task_run_serial:
            cls_name = task_dict.get("cls_name")
            if task_dict.get("need_to_run"):
                task = self.cls_map[cls_name]
                logger.info("task class %s starts" % cls_name)
                getattr(task(), "run_task")()
                logger.info("task class %s done" % cls_name)

    def run_single(self, cls_name):
        # self.analyse()
        task = self.cls_map[cls_name]
        getattr(task(), "run_task")()

    def load_not_exit(self):
        pass

    @staticmethod
    def reload_all_daily_hdfs():
        fabric_hdfs_utils = FabricHdfsUtils()
        if fabric_hdfs_utils.hdfs_exits(HDFS_RISK_MODEL_AUTO_RAW):
            fabric_hdfs_utils.hdfs_rmr(HDFS_RISK_MODEL_AUTO_RAW)
        fabric_hdfs_utils.hdfs_mkdir(HDFS_RISK_MODEL_AUTO_RAW)
        LoadRawData().put_all_daily()

    @staticmethod
    def export_raw_data():
        FabricDbUtils().export_all_raw_data_by_sh()

    def collect(self):
        """
        搜集
        """
        pass

    def save_all(self):
        """
        保存所有数据
        :return:
        """
        pass


现在:
from conf.all_task_conf import ALL_SPARK_CLASS_TASK
from conf.conf import HDFS_RISK_MODEL_AUTO_RAW
from controller.oslo_utils.importutils import import_class
from biz.sub.dag import TaskDag
from fabric_utils.fabric_utils import FabricHdfsUtils, FabricDbUtils
from scpy.logger import get_logger
from biz.load_raw_data.sub.load_data_to_hdfs import LoadRawData
import json

logger = get_logger(__file__)


class Controller(object):
    """
    控制层
    负责 查看 执行spark task class 里面的那些表存在那些表不存在
    生成计算图,调度计算过程
    """
    def __init__(self):
        self.task_dag = TaskDag()
        self.cls_map = {}
        self._task_run_serial = []
        self._task_run_serial_edg = []
        self.fabric_hdfs_utils = FabricHdfsUtils()
        for cls_dict in ALL_SPARK_CLASS_TASK:
            cls_str = cls_dict.get("cls_name")
            this_cls = import_class(cls_str)
            self.cls_map[cls_str] = this_cls
            a_node_dag = getattr(this_cls(), "get_spark_task")()
            depend_tables = a_node_dag["depend_tables"]
            result_tables = a_node_dag["result_tables"]
            # 构建dag, 添加节点
            self.task_dag.add_nodes(depend_tables+result_tables)
            # 构建dag, 添加边
            self.task_dag.add_dag(cls_dict, depend_tables, result_tables)

    def plot(self):
        self.analyse()
        self.task_dag.plot(view=True)

    def analyse(self):
        # 查看那个表计算是存在的那个表是不存在的。
        self.task_dag.set_table_info(self.fabric_hdfs_utils.hdfs_exits)
        # 做bfs
        to_left_link, self._task_run_serial_edg, self._task_run_serial = self.task_dag.bfs()

        to_left_tables = [_["target"] for _ in to_left_link]
        logger.info("to_left_tables:\n" + json.dumps(to_left_tables, ensure_ascii=False, indent=4))
        logger.info("_task_run_serial:\n" + json.dumps(self._task_run_serial, ensure_ascii=False, indent=4))

    def run_all(self):
        self.analyse()

        for task_dict in self._task_run_serial:
            cls_name = task_dict.get("cls_name")
            if task_dict.get("need_to_run"):
                task = self.cls_map[cls_name]
                logger.info("task class %s starts" % cls_name)
                getattr(task(), "run_task")()
                logger.info("task class %s done" % cls_name)

    def run_single(self, cls_name):
        # self.analyse()
        task = self.cls_map[cls_name]
        getattr(task(), "run_task")()

    def load_not_exit(self):
        pass

    @staticmethod
    def reload_all_daily_hdfs():
        fabric_hdfs_utils = FabricHdfsUtils()
        if fabric_hdfs_utils.hdfs_exits(HDFS_RISK_MODEL_AUTO_RAW):
            fabric_hdfs_utils.hdfs_rmr(HDFS_RISK_MODEL_AUTO_RAW)
        fabric_hdfs_utils.hdfs_mkdir(HDFS_RISK_MODEL_AUTO_RAW)
        LoadRawData().put_all_daily()

    @staticmethod
    def export_raw_data():
        FabricDbUtils().export_all_raw_data_by_sh()

    def collect(self):
        """
        搜集
        """
        pass

    def save_all(self):
        """
        保存所有数据
        :return:
        """
        pass

/home/sc/PycharmProjects/sc/model-feature-engine/conf/all_task_conf.py
/home/sc/PycharmProjects/sc/model-feature-engine/biz/feature/network/feature_extract_network_all_link_judgedoc_cnt.py

h( t )=h _0( t )exp(%beta _1 x_1+%beta _2 x_2+...+%beta _p x_p )



from fabric_utils.fabric_utils import Deploy
import argparse
from conf.conf import WORK_DIR
import os


def build_run_task_fs(model="all", cls_name="", task_name="run.py"):
    task_fs_str = """
from controller.main_controller import Controller
from functools import partial


def run_model(model="all", cls_name=None):
    if model == "all":
        run = Controller().run_all
    elif model == "single" and cls_name and isinstance(cls_name, str):
        run = partial(Controller().run_single, cls_name=cls_name)
    else:
        raise Exception()

    return run


run_model(model="%s", cls_name="%s")()
""" % (model, cls_name)

    with open(os.path.join(WORK_DIR, task_name), "w") as fp:
        fp.write(task_fs_str)


if __name__ == '__main__':
    task_name = "run.py"

    parser = argparse.ArgumentParser(add_help=False)
    parser.add_argument('-a', help='action. set_env, deploy_submit. (指定运行模式)', default='deploy_submit', type=str, choices=["set_env", "deploy_submit"])
    parser.add_argument('-m', help='model. all, single. (提交运行的方式, all运行所有, single, 运行单一 class,运行单一class时需要指定cls)', default='all', choices=["all", "single"])
    parser.add_argument('-cls', help='class name to run in single model。 单一模式下需要指定的cls', default="")
    parser.add_argument('--help', action='help')
    args = parser.parse_args()
    build_run_task_fs(args.m, args.cls, task_name)

    if args.a == "deploy_submit":
        Deploy().deploy()
        Deploy().run_submit_task(task_name)
    elif args.a == "set_env":
        Deploy().deploy()
        Deploy().setup_py_env()
    else:
        raise Exception("please run python deploy.py --help to get help")

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值