1 # -*- coding: utf-8 -*- 2 3 # 代码8-1 查看数据特征 4 5 import numpy as np 6 import pandas as pd 7 8 inputfile = r'C:\Users\86184\Desktop\文件集\data\GoodsOrder.csv' # 输入的数据文件 9 data = pd.read_csv(inputfile,encoding = 'gbk') # 读取数据 10 data .info() # 查看数据属性 11 12 data = data['id'] 13 description = [data.count(),data.min(), data.max()] # 依次计算总数、最小值、最大值 14 description = pd.DataFrame(description, index = ['Count','Min', 'Max']).T # 将结果存入数据框 15 print('描述性统计结果:\n',np.round(description)) # 输出结果
1 # 代码8-2 分析热销商品 2 3 # 销量排行前10商品的销量及其占比 4 import pandas as pd 5 inputfile = r'C:\Users\86184\Desktop\文件集\data\GoodsOrder.csv' # 输入的数据文件 6 data = pd.read_csv(inputfile,encoding = 'gbk') # 读取数据 7 group = data.groupby(['Goods']).count().reset_index() # 对商品进行分类汇总 8 sorted=group.sort_values('id',ascending=False) 9 print('销量排行前10商品的销量:\n', sorted[:10]) # 排序并查看前10位热销商品 10 11 # 画条形图展示出销量排行前10商品的销量 12 import matplotlib.pyplot as plt 13 x=sorted[:10]['Goods'] 14 y=sorted[:10]['id'] 15 plt.figure(figsize = (10, 6)) # 设置画布大小 16 plt.barh(x,y) 17 plt.rcParams['font.sans-serif'] = 'SimHei' 18 plt.xlabel('销量') # 设置x轴标题 19 plt.ylabel('商品类别') # 设置y轴标题 20 plt.title('商品的销量TOP10 num =3013',fontsize = 20) # 设置标题 21 plt.savefig(r'C:\Users\86184\Desktop\文件集\data\top10.png') # 把图片以.png格式保存 22 plt.show() # 展示图片 23 24 # 销量排行前10商品的销量占比 25 data_nums = data.shape[0] 26 for idnex, row in sorted[:10].iterrows(): 27 print(row['Goods'],row['id'],row['id']/data_nums)
1 # 代码8-3 各类别商品的销量及其占比 2 3 import pandas as pd 4 inputfile1 = r'C:\Users\86184\Desktop\文件集\data\GoodsOrder.csv' 5 inputfile2 = r'C:\Users\86184\Desktop\文件集\data\GoodsTypes.csv' 6 data = pd.read_csv(inputfile1,encoding = 'gbk') 7 types = pd.read_csv(inputfile2,encoding = 'gbk') # 读入数据 8 9 group = data.groupby(['Goods']).count().reset_index() 10 sort = group.sort_values('id',ascending = False).reset_index() 11 data_nums = data.shape[0] # 总量 12 del sort['index'] 13 14 sort_links = pd.merge(sort,types) # 合并两个datafreame 根据type 15 # 根据类别求和,每个商品类别的总量,并排序 16 sort_link = sort_links.groupby(['Types']).sum().reset_index() 17 sort_link = sort_link.sort_values('id',ascending = False).reset_index() 18 del sort_link['index'] # 删除“index”列 19 20 # 求百分比,然后更换列名,最后输出到文件 21 sort_link['count'] = sort_link.apply(lambda line: line['id']/data_nums,axis=1) 22 sort_link.rename(columns = {'count':'percent'},inplace = True) 23 print('各类别商品的销量及其占比:\n',sort_link) 24 outfile1 = r'C:\Users\86184\Desktop\文件集\data\percent.csv' 25 sort_link.to_csv(outfile1,index = False,header = True,encoding='gbk') # 保存结果 26 27 # 画饼图展示每类商品销量占比 28 import matplotlib.pyplot as plt 29 data = sort_link['percent'] 30 labels = sort_link['Types'] 31 plt.figure(figsize=(10, 8)) # 设置画布大小 32 plt.pie(data,labels=labels,autopct='%1.2f%%') 33 plt.rcParams['font.sans-serif'] = 'SimHei' 34 plt.title('每类商品销量占比 num=3013',fontsize = 20) # 设置标题 35 plt.savefig(r'C:\Users\86184\Desktop\文件集\data\persent.png') # 把图片以.png格式保存 36 plt.show()
1 # 代码8-4 非酒精饮料内部商品的销量及其占比 2 3 # 先筛选“非酒精饮料”类型的商品,然后求百分比,然后输出结果到文件。 4 selected = sort_links.loc[sort_links['Types'] == '非酒精饮料'] # 挑选商品类别为“非酒精饮料”并排序 5 child_nums = selected['id'].sum() # 对所有的“非酒精饮料”求和 6 selected['child_percent'] = selected.apply(lambda line: line['id']/child_nums,axis = 1) # 求百分比 7 selected.rename(columns = {'id':'count'},inplace = True) 8 print('非酒精饮料内部商品的销量及其占比:\n',selected) 9 outfile2 = r'C:\Users\86184\Desktop\文件集\data\child_percent.csv' 10 sort_link.to_csv(outfile2,index = False,header = True,encoding='gbk') # 输出结果 11 12 # 画饼图展示非酒精饮品内部各商品的销量占比 13 import matplotlib.pyplot as plt 14 data = selected['child_percent'] 15 labels = selected['Goods'] 16 plt.figure(figsize = (10,8)) # 设置画布大小 17 explode = (0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.08,0.3,0.1,0.3) # 设置每一块分割出的间隙大小 18 plt.pie(data,explode = explode,labels = labels,autopct = '%1.2f%%', 19 pctdistance = 1.1,labeldistance = 1.2) 20 plt.rcParams['font.sans-serif'] = 'SimHei' 21 plt.title("非酒精饮料内部各商品的销量占比 num=3013",fontsize=20) # 设置标题 22 plt.axis('equal') 23 plt.savefig(r'C:\Users\86184\Desktop\文件集\data\child_persent.png') # 保存图形 24 plt.show() # 展示图形
1 # -*- coding: utf-8 -*- 2 3 # 代码8-5 数据转换 4 5 import pandas as pd 6 inputfile=r'C:\Users\86184\Desktop\文件集\data\GoodsOrder.csv' 7 data = pd.read_csv(inputfile,encoding = 'gbk') 8 9 # 根据id对“Goods”列合并,并使用“,”将各商品隔开 10 data['Goods'] = data['Goods'].apply(lambda x:','+x) 11 data = data.groupby('id').sum().reset_index() 12 13 # 对合并的商品列转换数据格式 14 data['Goods'] = data['Goods'].apply(lambda x :[x[1:]]) 15 data_list = list(data['Goods']) 16 17 # 分割商品名为每个元素 18 data_translation = [] 19 for i in data_list: 20 p = i[0].split(',') 21 data_translation.append(p) 22 print('数据转换结果的前5个元素:\n', data_translation[0:5])
1 # 代码8-6 构建关联规则模型 2 3 from numpy import * 4 5 def loadDataSet(): 6 return [['a', 'c', 'e'], ['b', 'd'], ['b', 'c'], ['a', 'b', 'c', 'd'], ['a', 'b'], ['b', 'c'], ['a', 'b'], 7 ['a', 'b', 'c', 'e'], ['a', 'b', 'c'], ['a', 'c', 'e']] 8 9 def createC1(dataSet): 10 C1 = [] 11 for transaction in dataSet: 12 for item in transaction: 13 if not [item] in C1: 14 C1.append([item]) 15 C1.sort() 16 # 映射为frozenset唯一性的,可使用其构造字典 17 return list(map(frozenset, C1)) 18 19 # 从候选K项集到频繁K项集(支持度计算) 20 def scanD(D, Ck, minSupport): 21 ssCnt = {} 22 for tid in D: # 遍历数据集 23 for can in Ck: # 遍历候选项 24 if can.issubset(tid): # 判断候选项中是否含数据集的各项 25 if not can in ssCnt: 26 ssCnt[can] = 1 # 不含设为1 27 else: 28 ssCnt[can] += 1 # 有则计数加1 29 numItems = float(len(D)) # 数据集大小 30 retList = [] # L1初始化 31 supportData = {} # 记录候选项中各个数据的支持度 32 for key in ssCnt: 33 support = ssCnt[key] / numItems # 计算支持度 34 if support >= minSupport: 35 retList.insert(0, key) # 满足条件加入L1中 36 supportData[key] = support 37 return retList, supportData 38 39 def calSupport(D, Ck, min_support): 40 dict_sup = {} 41 for i in D: 42 for j in Ck: 43 if j.issubset(i): 44 if not j in dict_sup: 45 dict_sup[j] = 1 46 else: 47 dict_sup[j] += 1 48 sumCount = float(len(D)) 49 supportData = {} 50 relist = [] 51 for i in dict_sup: 52 temp_sup = dict_sup[i] / sumCount 53 if temp_sup >= min_support: 54 relist.append(i) 55 # 此处可设置返回全部的支持度数据(或者频繁项集的支持度数据) 56 supportData[i] = temp_sup 57 return relist, supportData 58 59 # 改进剪枝算法 60 def aprioriGen(Lk, k): 61 retList = [] 62 lenLk = len(Lk) 63 for i in range(lenLk): 64 for j in range(i + 1, lenLk): # 两两组合遍历 65 L1 = list(Lk[i])[:k - 2] 66 L2 = list(Lk[j])[:k - 2] 67 L1.sort() 68 L2.sort() 69 if L1 == L2: # 前k-1项相等,则可相乘,这样可防止重复项出现 70 # 进行剪枝(a1为k项集中的一个元素,b为它的所有k-1项子集) 71 a = Lk[i] | Lk[j] # a为frozenset()集合 72 a1 = list(a) 73 b = [] 74 # 遍历取出每一个元素,转换为set,依次从a1中剔除该元素,并加入到b中 75 for q in range(len(a1)): 76 t = [a1[q]] 77 tt = frozenset(set(a1) - set(t)) 78 b.append(tt) 79 t = 0 80 for w in b: 81 # 当b(即所有k-1项子集)都是Lk(频繁的)的子集,则保留,否则删除。 82 if w in Lk: 83 t += 1 84 if t == len(b): 85 retList.append(b[0] | b[1]) 86 return retList 87 88 def apriori(dataSet, minSupport=0.2): 89 # 前3条语句是对计算查找单个元素中的频繁项集 90 C1 = createC1(dataSet) 91 D = list(map(set, dataSet)) # 使用list()转换为列表 92 L1, supportData = calSupport(D, C1, minSupport) 93 L = [L1] # 加列表框,使得1项集为一个单独元素 94 k = 2 95 while (len(L[k - 2]) > 0): # 是否还有候选集 96 Ck = aprioriGen(L[k - 2], k) 97 Lk, supK = scanD(D, Ck, minSupport) # scan DB to get Lk 98 supportData.update(supK) # 把supk的键值对添加到supportData里 99 L.append(Lk) # L最后一个值为空集 100 k += 1 101 del L[-1] # 删除最后一个空集 102 return L, supportData # L为频繁项集,为一个列表,1,2,3项集分别为一个元素 103 104 # 生成集合的所有子集 105 def getSubset(fromList, toList): 106 for i in range(len(fromList)): 107 t = [fromList[i]] 108 tt = frozenset(set(fromList) - set(t)) 109 if not tt in toList: 110 toList.append(tt) 111 tt = list(tt) 112 if len(tt) > 1: 113 getSubset(tt, toList) 114 115 def calcConf(freqSet, H, supportData, ruleList, minConf=0.7): 116 for conseq in H: #遍历H中的所有项集并计算它们的可信度值 117 conf = supportData[freqSet] / supportData[freqSet - conseq] # 可信度计算,结合支持度数据 118 # 提升度lift计算lift = p(a & b) / p(a)*p(b) 119 lift = supportData[freqSet] / (supportData[conseq] * supportData[freqSet - conseq]) 120 121 if conf >= minConf and lift > 1: 122 print(freqSet - conseq, '-->', conseq, '支持度', round(supportData[freqSet], 6), '置信度:', round(conf, 6), 123 'lift值为:', round(lift, 6)) 124 ruleList.append((freqSet - conseq, conseq, conf)) 125 126 # 生成规则 127 def gen_rule(L, supportData, minConf = 0.7): 128 bigRuleList = [] 129 for i in range(1, len(L)): # 从二项集开始计算 130 for freqSet in L[i]: # freqSet为所有的k项集 131 # 求该三项集的所有非空子集,1项集,2项集,直到k-1项集,用H1表示,为list类型,里面为frozenset类型, 132 H1 = list(freqSet) 133 all_subset = [] 134 getSubset(H1, all_subset) # 生成所有的子集 135 calcConf(freqSet, all_subset, supportData, bigRuleList, minConf) 136 return bigRuleList 137 138 if __name__ == '__main__': 139 dataSet = data_translation 140 L, supportData = apriori(dataSet, minSupport = 0.02) 141 rule = gen_rule(L, supportData, minConf = 0.35)
标签:sort,10,plt,购物篮,python,零售,supportData,csv,data From: https://www.cnblogs.com/D753868713/p/17234493.html