PySpark的不同文件格式读取,如:sc.parallelize、hdfs读取资料、csv、txt 、json
## 用pysaprk建立第一个RDD
from __future__ import print_function,division
from pyspark import SparkConf,SparkContext
from pysaprk.sql import SparkSession
## 启动spark
spark= SparkSession.builder.master(‘local’).appName(‘test’).getOrCreate()
sc = spark.sparkContext
## 建立第一个RDD --- sparkContext
wordsList = ['cat','elephant','rat','cat']
wordsRDD = sc.parallelize(wordsList,2) #parallelize
print(type(wordsRDD)) #查看类型
wordsRDD.count() #查看行数
wordsRDD.take(5) #前5个项目
wordsRDD.collect() #适用于小数据量
## 从hdfs中读取资料
textFromHDFS = spark.read.text('hdfs://tmp/NASA_access') #读取文件
print(type(textFromH