給定交易資料集,FP增長的第一步是計算專案頻率並識別頻繁專案。與為同樣目的設計的類似Apriori的演算法不同,FP增長的第二步使用字尾樹(FP-tree)結構來編碼事務,而不會顯式生成候選集,生成的代價通常很高。第二步之後,可以從FP樹中提取頻繁項集。
import org.apache.spark.sql.SparkSession import org.apache.spark.mllib.fpm.FPGrowth import org.apache.spark.rdd.RDD val spark = SparkSession .builder() .appName("Spark SQL basic example") .config("spark.some.config.option", "some-value") .getOrCreate() // For implicit conversions like converting RDDs to DataFrames import spark.implicits._ val data = List( "1,2,5", "1,2,3,5", "1,2").toDF("items") data: org.apache.spark.sql.DataFrame = [items: string] // 注意每行,頭部和尾部的[中括號 data.rdd.map { s => s.toString() }.collect().take(3) res20: Array[String] = Array([1,2,5], [1,2,3,5], [1,2]) val transactions: RDD[Array[String]] = data.rdd.map { s => val str = s.toString().drop(1).dropRight(1) str.trim().split(",") } val fpg = new FPGrowth().setMinSupport(0.5).setNumPartitions(8) val model = fpg.run(transactions) /* model.freqItemsets.collect().foreach { itemset => println(itemset.items.mkString("[", ",", "]") + ", " + itemset.freq) }*/ val freqItemSets = model.freqItemsets.map { itemset => val items = itemset.items.mkString(",") val freq = itemset.freq (items, freq) }.toDF("items", "freq") freqItemSets: org.apache.spark.sql.DataFrame = [items: string, freq: bigint] freqItemSets.show +-----+----+ |items|freq| +-----+----+ | 1| 3| | 2| 3| | 2,1| 3| | 5| 2| | 5,2| 2| |5,2,1| 2| | 5,1| 2| +-----+----+ val minConfidence = 0.6 minConfidence: Double = 0.6 /*model.generateAssociationRules(minConfidence).collect().foreach { rule => println( rule.antecedent.mkString("[", ",", "]") + " => " + rule.consequent.mkString("[", ",", "]") + ", " + rule.confidence) }*/ // 根據置信度生成關聯規則 val Rules = model.generateAssociationRules(minConfidence) Rules: org.apache.spark.rdd.RDD[org.apache.spark.mllib.fpm.AssociationRules.Rule[String]] = MapPartitionsRDD[129] at filter at AssociationRules.scala:80 val df = Rules.map { s => val L = s.antecedent.mkString(",") val R = s.consequent.mkString(",") val confidence = s.confidence (L, R, confidence) }.toDF("left_collect", "right_collect", "confidence") df: org.apache.spark.sql.DataFrame = [left_collect: string, right_collect: string ... 1 more field] df.show +------------+-------------+------------------+ |left_collect|right_collect| confidence| +------------+-------------+------------------+ | 2| 5|0.6666666666666666| | 2| 1| 1.0| | 5,2| 1| 1.0| | 5| 2| 1.0| | 5| 1| 1.0| | 1| 5|0.6666666666666666| | 1| 2| 1.0| | 2,1| 5|0.6666666666666666| | 5,1| 2| 1.0| +------------+-------------+------------------+