下面给出python版的Apriori算法:
'''
@author: Peter
'''
from numpy import *
def loadDataSet():
return [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]
def createC1(dataSet):
C1 = []
for transaction in dataSet:
for item in transaction:
if not [item] in C1:
C1.append([item])
C1.sort()
return list(map(frozenset, C1))#use frozen set so we
#can use it as a key in a dict
def scanD(D, Ck, minSupport):
ssCnt = {}
for tid in D:
for can in Ck:
if can.issubset(tid):
if can not in ssCnt:
ssCnt[can]=1
else: ssCnt[can] += 1
numItems = float(len(D))
retList = []
supportData = {}
for key in ssCnt:
support = ssCnt[key]/numItems
if support >= minSupport:
retList.insert(0,key)
supportData[key] = support
return retList, supportData
def aprioriGen(Lk, k): #creates Ck
retList = []
lenLk = len(Lk)
for i in range(lenLk):
for j in range(i+1, lenLk):
L1 = list(Lk[i])[:k-2]; L2 = list(Lk[j])[:k-2]
L1.sort(); L2.sort()
if L1==L2: #if first k-2 elements are equal
retList.append(Lk[i] | Lk[j]) #set union
return retList
def apriori(dataSet, minSupport = 0.5):
C1 = createC1(dataSet)
D = list(map(set, dataSet))
L1, supportData = scanD(D, C1, minSupport)
L = [L1]
k = 2
while (len(L[k-2]) > 0):
Ck = aprioriGen(L[k-2], k)
Lk, supK = scanD(D, Ck, minSupport)#scan DB to get Lk
supportData.update(supK)
L.append(Lk)
k += 1
return L, supportData
对于频繁项集{0,1,2,3}的关联规则的网格示意图
阴影区域给出的是低置信度的规则。如果发现{0,1,2} ->{3} 是一条低可置信度规则,那么所有其它以{0,1,2}子集做前件,以3作为后件的规则的可信度也会较低。
def generateRules(L, supportData, minConf=0.7): #supportData is a dict coming from scanD
bigRuleList = []
for i in range(1, len(L)):#only get the sets with two or more items
for freqSet in L[i]:
H1 = [frozenset([item]) for item in freqSet]
if (i > 1):
rulesFromConseq(freqSet, H1, supportData, bigRuleList, minConf)
else:
calcConf(freqSet, H1, supportData, bigRuleList, minConf)
return bigRuleList
def calcConf(freqSet, H, supportData, brl, minConf=0.7):
prunedH = [] #create new list to return
for conseq in H:
conf = supportData[freqSet]/supportData[freqSet-conseq] #calc confidence
if conf >= minConf:
print (freqSet-conseq,'-->',conseq,', conf:',conf)
brl.append((freqSet-conseq, conseq, conf))
prunedH.append(conseq)
return prunedH
def rulesFromConseq(freqSet, H, supportData, brl, minConf=0.7):
m = len(H[0])
if (len(freqSet) > (m + 1)): #try further merging
Hmp1 = aprioriGen(H, m+1)#create Hm+1 new candidates
Hmp1 = calcConf(freqSet, Hmp1, supportData, brl, minConf)
if (len(Hmp1) > 1): #need at least two sets to merge
rulesFromConseq(freqSet, Hmp1, supportData, brl, minConf)
def pntRules(ruleList, itemMeaning):
for ruleTup in ruleList:
for item in ruleTup[0]:
print (itemMeaning[item])
print(" -------->")
for item in ruleTup[1]:
print (itemMeaning[item])
print ("confidence: %f" % ruleTup[2])
print() #print a blank line
dataset = loadDataSet()
print("dataset:", dataset,"\n")
#C1 = createC1(dataset)
#D = list(map(set, dataset))
#L1, suppData0 = scanD(D, C1, 0.75)
#print(L1)
#发现频繁项
ms = 0.5
L, suppData = apriori(dataset, minSupport =ms)
#查看所有项集的支持度
print("查看所有项集的支持度:")
#因为要做字典的键,所以使用frozenset,而不能使用普通的集合(因不可哈希)
for key, value in suppData.items():
print(key,":", value)
print()
print("满足最小支持度%.2f的频繁项集:"%ms)
for i,l in enumerate(L):
print("%d -项集: "%(i+1))
if len(l):
for x in l:
print(x)
else: print(None)
print()
#发现关联规则
conf = 0.7
print("支持度不小于%.2f的频繁项集中,满足最小置信度%.2f的关联规则:"%(ms,conf))
rules = generateRules(L, suppData, minConf = conf)
对于简单数据集 [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]],输出结果如下:
可以看到,前两条包含2和5的规则可以互换前件和后件,但是后一天包含1和3的规则不行。
本文分享自 Python可视化编程机器学习OpenCV 微信公众号,前往查看
如有侵权,请联系 cloudcommunity@tencent.com 删除。
本文参与 腾讯云自媒体同步曝光计划 ,欢迎热爱写作的你一起参与!