from pyspark import SparkContext
textFile = SparkContext().textFile("hdfs://hadoop102:8020/input/words.txt")
wordCount = (
textFile.flatMap(lambda line: line.split(" "))
.filter(lambda word: word != "")
.map(lambda word: (word, 1))
.reduceByKey(lambda x, y: x + y)
.sortBy(lambda x: x[1], False)
.take(5)
)
print(wordCount)
# !/bin/bash
source /etc/profile
source ~/.bashrc
dt1=$1
echo "-------------------" $dt1
dt=`date -d"$dt1" '+%Y%m%d'`
spark-submit \
--master yarn \
--deploy-mode client \
--num-executors 30 \
--executor-memory 4G \
--executor-cores 2 \
--driver-memory 4G \
--conf spark.default.parallelism=40 \
--conf spark.executor.memoryOverhead=2560 \
--conf spark.network.timeout=200s \
--conf spark.driver.maxResultSize=2G \
--conf "spark.executor.extraJavaOptions=-XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCMSCompactAtFullCollection -XX:+UseCompressedOops -XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintHeapAtGC -XX:+PrintGCApplicationConcurrentTime -Xloggc:gc.%p_%t.log -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/spark/java_pid<pid>.hprof" \
/root/WorldCountDemo.py \
> /root/logs/${dt1}.log 2>&1
pyspark提交到集群运行
最新推荐文章于 2023-03-28 15:32:21 发布