1. download
I chosed spark-1.6.0-bin-hadoop2.4
2. mv it to /usr/local/src/
3. edit .bashrc
export SPARK_HOME=/usr/local/src/spark-1.6.0-bin-hadoop2.4
export PATH=$SPARK_HOME/bin:$PATH
export PYTHONPATH="${PYTHONPATH}:/usr/local/src/spark-1.6.0-bin-hadoop2.4/python"
export PATH=$SPARK_HOME/bin:$PATH
export PYTHONPATH="${PYTHONPATH}:/usr/local/src/spark-1.6.0-bin-hadoop2.4/python"
export PYTHONPATH="${PYTHONPATH}:/usr/local/src/spark-1.6.0-bin-hadoop2.4/python/lib/py4j-0.9-src.zip"
export SPARK_HOME=/usr/local/src/spark-1.6.0-bin-hadoop2.4/
export PYTHONPATH=$SPARK_HOME/python:$SPARK_HOME/python/build:$PYTHONPATH
export PYTHONPATH=$SPARK_HOME/python/lib/py4j-0.9-src.zip:$PYTHONPATH
---------------------------------------------------------------------------------------
[1].simple eg:
import os
import sys
# Path for spark source folder
os.environ['SPARK_HOME']="/usr/local/src/spark-1.6.0-bin-hadoop2.4"
# Append pyspark to Python Path
sys.path.append("/usr/local/src/spark-1.6.0-bin-hadoop2.4/python")
sys.path.append("/usr/local/src/spark-1.6.0-bin-hadoop2.4/python/lib/py4j-0.9-src.zip")
try:
from pyspark import SparkContext
from pyspark import SparkConf
print ("Successfully imported Spark Modules")
except ImportError as e:
print ("Can not import Spark Modules", e)
sys.exit(1)
from operator import add
sc=SparkContext(appName="myword")
lines=sc.textFile('data_2.txt',1)
tmp=lines.flatMap(lambda x:x.split(' ')).map(lambda x:(x,1))
counts=tmp.reduceByKey(add)
output=counts.collect();
for (word,count) in output:
print("xxx: %s %i" % (word,count))
import sys
# Path for spark source folder
os.environ['SPARK_HOME']="/usr/local/src/spark-1.6.0-bin-hadoop2.4"
# Append pyspark to Python Path
sys.path.append("/usr/local/src/spark-1.6.0-bin-hadoop2.4/python")
sys.path.append("/usr/local/src/spark-1.6.0-bin-hadoop2.4/python/lib/py4j-0.9-src.zip")
try:
from pyspark import SparkContext
from pyspark import SparkConf
print ("Successfully imported Spark Modules")
except ImportError as e:
print ("Can not import Spark Modules", e)
sys.exit(1)
from operator import add
sc=SparkContext(appName="myword")
lines=sc.textFile('data_2.txt',1)
tmp=lines.flatMap(lambda x:x.split(' ')).map(lambda x:(x,1))
counts=tmp.reduceByKey(add)
output=counts.collect();
for (word,count) in output:
print("xxx: %s %i" % (word,count))
sc.stop()
[2' self define sample:
main:
# prepare test data and map function
from class_define import wifi_data,determine_type
data_dict=[('a',wifi_data('a',1,2)),('b',wifi_data('b',2,3))]
print 'test func',determine_type(data_dict[1][1])
# prepare spark
import os
import sys
# Path for spark source folder
os.environ['SPARK_HOME']="/usr/local/src/spark-1.6.0-bin-hadoop2.4"
# Append pyspark to Python Path
sys.path.append("/usr/local/src/spark-1.6.0-bin-hadoop2.4/python")
sys.path.append("/usr/local/src/spark-1.6.0-bin-hadoop2.4/python/lib/py4j-0.9-src.zip")
try:
from pyspark import SparkContext
from pyspark import SparkConf
print ("Successfully imported Spark Modules")
except ImportError as e:
print ("Can not import Spark Modules", e)
sys.exit(1)
conf = SparkConf().setAppName("test").setMaster("local[2]")
sc = SparkContext(conf=conf,pyFiles=[os.path.join(os.path.abspath(os.path.dirname(__file__)),'class_define.py')])
my_rdd = sc.parallelize(data_dict)
print my_rdd.collect()
result = my_rdd.mapValues(determine_type).collectAsMap()
print 'result',result
from class_define import wifi_data,determine_type
data_dict=[('a',wifi_data('a',1,2)),('b',wifi_data('b',2,3))]
print 'test func',determine_type(data_dict[1][1])
# prepare spark
import os
import sys
# Path for spark source folder
os.environ['SPARK_HOME']="/usr/local/src/spark-1.6.0-bin-hadoop2.4"
# Append pyspark to Python Path
sys.path.append("/usr/local/src/spark-1.6.0-bin-hadoop2.4/python")
sys.path.append("/usr/local/src/spark-1.6.0-bin-hadoop2.4/python/lib/py4j-0.9-src.zip")
try:
from pyspark import SparkContext
from pyspark import SparkConf
print ("Successfully imported Spark Modules")
except ImportError as e:
print ("Can not import Spark Modules", e)
sys.exit(1)
conf = SparkConf().setAppName("test").setMaster("local[2]")
sc = SparkContext(conf=conf,pyFiles=[os.path.join(os.path.abspath(os.path.dirname(__file__)),'class_define.py')])
my_rdd = sc.parallelize(data_dict)
print my_rdd.collect()
result = my_rdd.mapValues(determine_type).collectAsMap()
print 'result',result
sc.stop()
class:
class wifi_data(object):
def __init__(self,mac,t,sn):
self.mac=mac
self.t=t
self.sn=sn
def determine_type(wifi_data):
if wifi_data.t >=2 and wifi_data.sn>=2:
return 'equipment'
else:
def __init__(self,mac,t,sn):
self.mac=mac
self.t=t
self.sn=sn
def determine_type(wifi_data):
if wifi_data.t >=2 and wifi_data.sn>=2:
return 'equipment'
else:
return 'customer'