# encoding=utf-8
from pyspark import SparkContext
from pyspark import SparkFiles
sc = SparkContext.getOrCreate()# dt = sc.addFile('hdfs://192.168.56.122:9000/data')
with open(SparkFiles.get('e:/data.txt')) as files:
ctx = files.readlines()for x in ctx:
print(x)
加载已存在的文件并调用方法
# 创建文件 myPyspark_ReadFile2.py# encoding=utf-8
def mynum(p):
return'hello man'
from pyspark import SparkContext
from pyspark import SparkFiles
sc = SparkContext.getOrCreate()
sc.addFile("myPyspark_ReadFile2.py")
from myPyspark_ReadFile2 import mynum
print(mynum('dd'))
读取csv文件修改列名
# encoding = utf-8
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
spark = SparkSession.builder.getOrCreate()# 更改列名df= spark.read.format('csv').load('hdfs://192.168.56.122:9000/ord/orders.csv').withColumn('id',col('_c0')).withColumn('regdate',col('_c1')).withColumn('peoplenum',col('_c2')).withColumn('peoplenum',col('_c2')).withColumn('status',col('_c3')).select(col('id'),col('regdate'),col('peoplenum'),col('status'))
df.show(3)