导入工具包
from pyspark.conf import SparkConf #spark集群配置的各种参数
from pyspark.sql import SQLContext #SQL方法的主入口
from pyspark.sql import SparkSession #启动Spark
import time #计算运行时间
t1 = time.time()
conf = SparkConf()\
.setExecutorEnv("PYHONHASHSEED","123")\
.setMaster("yarn")
spark SparkSession \
.builder \
.config(conf=conf)\
.config("spark.driver.maxResultSize", "64g")\
.config("spark.executor.memory", "16g")\
.config("hive.exec.dynamic.partition", "true")\
.config("hive.exec.dynamic.partition.mode", "nonstrict")\
.config("spark.driver.cores", 2)\
.config("spark.driver.maxResultSize", "32g")\
.config("spark.driver.memory", "32g")\
.config("spark.executor.memory", "45g")\
.config("spark.executor.instances", 16)\
.config("spark.executor.cores", 8)\
.config("spark.kryoserializer.butter.max", "128m")\
.config("spark.network.timeout", "10000000")\
.config("spark.sql.autoBroadcastJoinThreshold", "128")\
.config("spark.sql.broadcastTimeout", "500000")\
.config("spark.sql.shuffle.partitions","800")\
.config("spark.sql.sources,partitionOverwriteMode", "dynamic")\
.config("spark.yarn.am.memory", "16g")\
.config("spark.yarn.am.cores", 2)\
.config("spark.yarn.executor.memoryOverhead", "128g")\
.config("yarn.nodemanager.vmem-check-enabled", "False")\
.config("yarn.nodemanager.pmen-check-enabled", "False")\
.config("spark.dynamicAllocation.maxExecutors", "500")\
.appName("project")\
.enableHiveSupport()\
.getOrCreate()
sc = spark.sparkContext
print(f" 启动spark花费时间: {time.time()-t1:.05f}s")
总结:日常修改参数核心:
以下根据实际情况调整g数
.config("spark.driver.maxResultSize", "64g")\
.config("spark.executor.memory", "16g")\