1 maven依赖包如下
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>1.5.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-mllib_2.11</artifactId>
<version>1.5.1</version>
</dependency>
2 计算数据的格式
a b c
a b
a b c d
a c d
3 关联规则代码如下
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.log4j.Logger
import org.apache.log4j.Level
import org.apache.spark.mllib.fpm.FPGrowth
object fpg {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("关联规则").setMaster("local[2]")
val sc = new SparkContext(conf)
Logger.getRootLogger.setLevel(Level.WARN)
val data = sc.textFile("f:/data/item.txt", 10)
val example = data.map(_.split(" ")).cache()
val minSupport = 0.5
val numPartitions = 10
val model = new FPGrowth().setMinSupport(minSupport).setNumPartitions(numPartitions).run(example)
println("出现频次----------------")
model.freqItemsets.collect().foreach { x =>
println(x.items.mkString("[", ",", "],") + x.items.length)
}
println("置信度---------------")
model.generateAssociationRules(minSupport).collect().foreach { x =>
println(x.antecedent.mkString("[", ",", "]") + " => " + x.consequent.mkString("[", ",", "]") + ", " + x.confidence)
}
}
}