from pyspark import SparkContext
sc = SparkContext("local","fruit")
data = sc.textFile("products.txt")
header = data.first()
data = data.filter(lambda x: x != header)
fruits = data.map(lambda x: (x.split(",")[3], float(x.split(",")[2])))
avg_price = fruits.aggregateByKey((0, 0), lambda acc, price: (acc[0] + price, acc[1] + 1),
lambda acc1, acc2: (acc1[0] + acc2[0], acc1[1] + acc2[1])) \
.mapValues(lambda x: x[0]/x[1])
for category, price in avg_price.collect():
print("Category:", category, "\tAverage Price:", price)
from pyspark.sql import SparkSession
from pyspark.sql.functions import avg,count
spark = SparkSession.builder.appName("Scores Analysis").getOrCreate()
data = spark.read.csv("exam.txt", header=True, inferSchema=True)
city_stats = data.groupBy("city").agg(count("*").alias("num_students"), avg("score").alias("avg_score"))
top_cities = city_stats.filter("avg_score > 85")
top_cities.show()
spark.stop()
from pyspark.streaming import StreamingContext
from pyspark.sql import SparkSession, Window
from pyspark.sql.functions import count, first
spark = SparkSession.builder.appName("IP Analysis").getOrCreate()
ssc = StreamingContext(spark.sparkContext, batchDuration=10)
data = ssc.textFileStream("web.log")
logs = data.map(lambda x: x.split(" ")) \
.map(lambda x: (x[0], x[3].lstrip('"'), x[5], x[6], x[8]))
fields = ["ip", "timestamp", "method", "path", "status"]
logs_df = logs.toDF(fields)
result = logs_df.filter(logs_df.method == "GET") \
.groupBy(logs_df.timestamp, logs_df.ip) \
.agg(count("*").alias("num_requests")) \
.withColumn("max_requests", count("*").over(w)) \
.where("num_requests = max_requests") \
.groupBy("timestamp") \
.agg(first("ip").alias("ip"), first("max_requests").alias("max_requests"))
result.printSchema()
query = result.writeStream.outputMode("complete").format("console").start()
query.awaitTermination()
spark.stop()
ssc.stop()
from pyspark.sql import SparkSession
from pyspark.ml.feature import StringIndexer, VectorAssembler
spark = SparkSession.builder.appName("mushrooms").master("local[2]").getOrCreate()
df1 = spark.read.csv("mushrooms.csv", header=True, inferSchema=True)
df1.show(10)
vectorAssembler = VectorAssembler().setInputCols(["new_" + col for col in df1.columns if col != "class"]).setOutputCol("features")
df3 = vectorAssembler.transform(df2).select("features", "label")
df3.show(10)
(trainingData, testData) = df3.randomSplit([0.7, 0.3], seed=2023)
from pyspark.ml.classification import RandomForestClassifier
rf = RandomForestClassifier().setLabelCol("label").setFeaturesCol("features").setNumTrees(10)
model = rf.fit(trainingData)
predictions = model.transform(testData)
predictions.select("label", "probability", "prediction").show(2)
==========================================================
from pyspark import SparkContext
sc = SparkContext("local","fruit")
data = sc.textFile("products.txt")
header = data.first()
data = data.filter(lambda x: x != header)
fruits = data.map(lambda x: (x.split(",")[3], float(x.split(",")[2])))
avg_price = fruits.aggregateByKey((0, 0), lambda acc, price: (acc[0] + price, acc[1] + 1),
lambda acc1, acc2: (acc1[0] + acc2[0], acc1[1] + acc2[1])) \
.mapValues(lambda x: x[0]/x[1])
for category, price in avg_price.collect():
print("Category:", category, "\tAverage Price:", price)
from pyspark.sql import SparkSession
from pyspark.sql.functions import avg,count
spark = SparkSession.builder.appName("Scores Analysis").getOrCreate()
data = spark.read.csv("exam.txt", header=True, inferSchema=True)
city_stats = data.groupBy("city").agg(count("*").alias("num_students"), avg("score").alias("avg_score"))
top_cities = city_stats.filter("avg_score > 85")
top_cities.show()
spark.stop()