# 创建SparkContext
conf = SparkConf().setAppName("WordCount").setMaster("local") .set("spark.executor.memory", "512M")
sc = SparkContext(conf=conf)
inputFile = "hdfs://master:8020/wctest/input/wc.input"
textFile = sc.textFile(inputFile)
# wordCount = textFile.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda x, y: x + y)
# wordCount.foreach(print)
userRDD=textFile.map(lambda line:line.split("|"))
userRows=userRDD.map(lambda p:Row(userid=int(p[0]),
age=int(p[1]),
gender=p[2],
occupation=p[3],
zipcode=p[4]))
sqlCtx = SQLContext(sc)
user_df=sqlCtx.createDataFrame(userRows)
# user_df.printSchema()
# user_df.show(3)
user_df.registerTempTable("user")
users=sqlCtx.sql("""select gender,count(*) counts
from user GROUP BY gender""")
users_pandas=users.toPandas().set_index("gender")
up=users_pandas["counts"].plot(kind="pie",title="gender",figsize=(8,8),
startangle=90,autopct='%1.1f%%')
up.legend(bbox_to_anchor=(1.05,1),loc=2,borderaxespad=0)
plt.show()
pyspark sparksql代码示例
最新推荐文章于 2024-04-27 03:32:03 发布