环境:
ubuntu14+spark2.3.3+pycharm+scala2.11
启动spark:
/spark/sbin/start-all.sh
test.csv文件内容:
测试代码:
# python3
# -*- coding:utf-8 -*-
# @Time: 10/28/19 2:10 AM
# @Author: Damon
# @Software: PyCharm
from pyspark.sql import SparkSession,functions
import time,datetime
spark = SparkSession.builder.appName("validFieldsCount").master("local[*]").getOrCreate()
spark.sparkContext.setLogLevel("WARN")
#header=0->代表原始csv文件有列名,header=None->原始csv文件无列名
df=spark.read.csv("file:///home/hadoop/PycharmProjects/WordCount/src/traffice_early_warning/test.csv")
df1=df
df2=df
df3=df1.intersect(df2)
print(df3.show())