菜篮子分析
import numpy as np
import pandas as pd
data=pd.read_csv("C:\\Users\\Administrator\\Desktop\\heyang_hanyu.csv",header=None,encoding="gbk")
data.head()
trans = []#转化为数组
for i in range(0,500):
trans.append([str(data.values[i,j]) for j in range(0, 16)])
trans = np.array(trans)
print(trans.shape)
import pandas as pd
from mlxtend.preprocessing import TransactionEncoder
te = TransactionEncoder()
data = te.fit_transform(trans)
data = pd.DataFrame(data, columns = te.columns_)
data.shape
df=data.drop(["nan"],axis=1)#删除列名为nan的整列
df.head()
from mlxtend.frequent_patterns import apriori
#Now, let us return the items and itemsets with at least 0.5% support:
df1=apriori(df, min_support = 0.005, use_colnames = True)
df1.sort_values(["support"], ascending=False)#按降序排列
frequent_itemsets = apriori(df, min_support = 0.005, use_colnames=True)
frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x))
frequent_itemsets
frequent_itemsets["length"].value_counts()#查看长度为1/2/3的有多少个
df2=frequent_itemsets[ (frequent_itemsets['length'] == 2) &
(frequent_itemsets['support'] >= 0.005) ]
df2.sort_values(["support"], ascending=False)
frequent_itemsets[ (frequent_itemsets['length'] == 3) &
(frequent_itemsets['support'] >= 0.005) ]
from mlxtend.frequent_patterns import association_rules
association_rule = association_rules(frequent_itemsets,metric='confidence',min_threshold=0.9) # metric可以有很多的度量选项,返回的表列名都可以作为参数
association_rule.sort_values(by='leverage',ascending=False,inplace=True) #关联规则可以按leverage排序
print(association_rule)
完成