首页 > 其他分享 >电子商务网站行为分析及服务推荐

电子商务网站行为分析及服务推荐

时间:2023-04-13 16:46:47浏览次数:31  
标签:engine index 电子商务 网站 推荐 pd sql counts type

# -*- coding: utf-8 -*-

# 代码11-1

import os
import pandas as pd


# # 修改工作路径到指定文件夹
# os.chdir("./")

# # 第一种连接方式
from sqlalchemy import create_engine

engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8',echo=True)
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

# 第二种连接方式
# import pymysql as pm

# con = pm.connect(
# host='localhost',
# user='root',
# password='123456',
# database='test',
# charset='utf8')
# data = pd.read_sql('select * from all_gzdata',con=con)
# con.close() #关闭连接

# # 保存读取的数据
# data.to_csv('../../data/0328/all_gzdata.csv', index=False, encoding='utf-8')

# -*- coding: utf-8 -*-

# 代码11-2

import pandas as pd
from sqlalchemy import create_engine

engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)
# 分析网页类型
counts = [i['fullURLId'].value_counts() for i in sql] #逐块统计
counts = counts.copy()
counts = pd.concat(counts).groupby(level=0).sum() # 合并统计结果,把相同的统计项合并(即按index分组并求和)
counts = counts.reset_index() # 重新设置index,将原来的index作为counts的一列。
counts.columns = ['index', 'num'] # 重新设置列名,主要是第二列,默认为0
counts['type'] = counts['index'].str.extract('(\d{3})') # 提取前三个数字作为类别id
counts_ = counts[['type', 'num']].groupby('type').sum() # 按类别合并
counts_.sort_values(by='num', ascending=False, inplace=True) # 降序排列
counts_['ratio'] = counts_.iloc[:,0] / counts_.iloc[:,0].sum()
print(counts_)

 

# 代码11-3

# 因为只有107001一类,但是可以继续细分成三类:知识内容页、知识列表页、知识首页
def count107(i): #自定义统计函数
j = i[['fullURL']][i['fullURLId'].str.contains('107')].copy() # 找出类别包含107的网址
j['type'] = None # 添加空列
j['type'][j['fullURL'].str.contains('info/.+?/')]= '知识首页'
j['type'][j['fullURL'].str.contains('info/.+?/.+?')]= '知识列表页'
j['type'][j['fullURL'].str.contains('/\d+?_*\d+?\.html')]= '知识内容页'
return j['type'].value_counts()
# 注意:获取一次sql对象就需要重新访问一下数据库(!!!)
#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

counts2 = [count107(i) for i in sql] # 逐块统计
counts2 = pd.concat(counts2).groupby(level=0).sum() # 合并统计结果
print(counts2)
#计算各个部分的占比
res107 = pd.DataFrame(counts2)
# res107.reset_index(inplace=True)
res107.index.name= '107类型'
res107.rename(columns={'type':'num'}, inplace=True)
res107['比例'] = res107['num'] / res107['num'].sum()
res107.reset_index(inplace = True)
print(res107)

 

# 代码11-4

def countquestion(i): # 自定义统计函数
j = i[['fullURLId']][i['fullURL'].str.contains('\?')].copy() # 找出类别包含107的网址
return j

#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

counts3 = [countquestion(i)['fullURLId'].value_counts() for i in sql]
counts3 = pd.concat(counts3).groupby(level=0).sum()
print(counts3)

# 求各个类型的占比并保存数据
df1 = pd.DataFrame(counts3)
df1['perc'] = df1['fullURLId']/df1['fullURLId'].sum()*100
df1.sort_values(by='fullURLId',ascending=False,inplace=True)
print(df1.round(4))

 

# 代码11-5

def page199(i): #自定义统计函数
j = i[['fullURL','pageTitle']][(i['fullURLId'].str.contains('199')) &
(i['fullURL'].str.contains('\?'))]
j['pageTitle'].fillna('空',inplace=True)
j['type'] = '其他' # 添加空列
j['type'][j['pageTitle'].str.contains('法律快车-律师助手')]= '法律快车-律师助手'
j['type'][j['pageTitle'].str.contains('咨询发布成功')]= '咨询发布成功'
j['type'][j['pageTitle'].str.contains('免费发布法律咨询' )] = '免费发布法律咨询'
j['type'][j['pageTitle'].str.contains('法律快搜')] = '快搜'
j['type'][j['pageTitle'].str.contains('法律快车法律经验')] = '法律快车法律经验'
j['type'][j['pageTitle'].str.contains('法律快车法律咨询')] = '法律快车法律咨询'
j['type'][(j['pageTitle'].str.contains('_法律快车')) |
(j['pageTitle'].str.contains('-法律快车'))] = '法律快车'
j['type'][j['pageTitle'].str.contains('空')] = '空'

return j

# 注意:获取一次sql对象就需要重新访问一下数据库
#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息
#sql = pd.read_sql_query('select * from all_gzdata limit 10000', con=engine)

counts4 = [page199(i) for i in sql] # 逐块统计
counts4 = pd.concat(counts4)
d1 = counts4['type'].value_counts()
print(d1)
d2 = counts4[counts4['type']=='其他']
print(d2)
# 求各个部分的占比并保存数据
df1_ = pd.DataFrame(d1)
df1_['perc'] = df1_['type']/df1_['type'].sum()*100
df1_.sort_values(by='type',ascending=False,inplace=True)
print(df1_)


# 代码11-6

def xiaguang(i): #自定义统计函数
j = i.loc[(i['fullURL'].str.contains('\.html'))==False,
['fullURL','fullURLId','pageTitle']]
return j

# 注意获取一次sql对象就需要重新访问一下数据库
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息

counts5 = [xiaguang(i) for i in sql]
counts5 = pd.concat(counts5)

xg1 = counts5['fullURLId'].value_counts()
print(xg1)
# 求各个部分的占比
xg_ = pd.DataFrame(xg1)
xg_.reset_index(inplace=True)
xg_.columns= ['index', 'num']
xg_['perc'] = xg_['num']/xg_['num'].sum()*100
xg_.sort_values(by='num',ascending=False,inplace=True)

xg_['type'] = xg_['index'].str.extract('(\d{3})') #提取前三个数字作为类别id

xgs_ = xg_[['type', 'num']].groupby('type').sum() #按类别合并
xgs_.sort_values(by='num', ascending=False,inplace=True) #降序排列
xgs_['percentage'] = xgs_['num']/xgs_['num'].sum()*100

print(xgs_.round(4))


# 代码11-7

# 分析网页点击次数
# 统计点击次数
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息

counts1 = [i['realIP'].value_counts() for i in sql] # 分块统计各个IP的出现次数
counts1 = pd.concat(counts1).groupby(level=0).sum() # 合并统计结果,level=0表示按照index分组
print(counts1)

counts1_ = pd.DataFrame(counts1)
counts1_
counts1['realIP'] = counts1.index.tolist()

counts1_[1]=1 # 添加1列全为1
hit_count = counts1_.groupby('realIP').sum() # 统计各个“不同点击次数”分别出现的次数
# 也可以使用counts1_['realIP'].value_counts()功能
hit_count.columns=['用户数']
hit_count.index.name = '点击次数'

# 统计1~7次、7次以上的用户人数
hit_count.sort_index(inplace = True)
hit_count_7 = hit_count.iloc[:7,:]
time = hit_count.iloc[7:,0].sum() # 统计点击次数7次以上的用户数
hit_count_7 = hit_count_7.append([{'用户数':time}], ignore_index=True)
hit_count_7.index = ['1','2','3','4','5','6','7','7次以上']
hit_count_7['用户比例'] = hit_count_7['用户数'] / hit_count_7['用户数'].sum()
print(hit_count_7)

 

 

# -*- coding: utf-8 -*-

# 代码11-1

import os
import pandas as pd


# # 修改工作路径到指定文件夹
# os.chdir("./")

# # 第一种连接方式
from sqlalchemy import create_engine

engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8',echo=True)
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

# 第二种连接方式
# import pymysql as pm

# con = pm.connect(
# host='localhost',
# user='root',
# password='123456',
# database='test',
# charset='utf8')
# data = pd.read_sql('select * from all_gzdata',con=con)
# con.close() #关闭连接

# # 保存读取的数据
# data.to_csv('../../data/0328/all_gzdata.csv', index=False, encoding='utf-8')

# -*- coding: utf-8 -*-

# 代码11-2

import pandas as pd
from sqlalchemy import create_engine

engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)
# 分析网页类型
counts = [i['fullURLId'].value_counts() for i in sql] #逐块统计
counts = counts.copy()
counts = pd.concat(counts).groupby(level=0).sum() # 合并统计结果,把相同的统计项合并(即按index分组并求和)
counts = counts.reset_index() # 重新设置index,将原来的index作为counts的一列。
counts.columns = ['index', 'num'] # 重新设置列名,主要是第二列,默认为0
counts['type'] = counts['index'].str.extract('(\d{3})') # 提取前三个数字作为类别id
counts_ = counts[['type', 'num']].groupby('type').sum() # 按类别合并
counts_.sort_values(by='num', ascending=False, inplace=True) # 降序排列
counts_['ratio'] = counts_.iloc[:,0] / counts_.iloc[:,0].sum()
print(counts_)

 

# 代码11-3

# 因为只有107001一类,但是可以继续细分成三类:知识内容页、知识列表页、知识首页
def count107(i): #自定义统计函数
j = i[['fullURL']][i['fullURLId'].str.contains('107')].copy() # 找出类别包含107的网址
j['type'] = None # 添加空列
j['type'][j['fullURL'].str.contains('info/.+?/')]= '知识首页'
j['type'][j['fullURL'].str.contains('info/.+?/.+?')]= '知识列表页'
j['type'][j['fullURL'].str.contains('/\d+?_*\d+?\.html')]= '知识内容页'
return j['type'].value_counts()
# 注意:获取一次sql对象就需要重新访问一下数据库(!!!)
#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

counts2 = [count107(i) for i in sql] # 逐块统计
counts2 = pd.concat(counts2).groupby(level=0).sum() # 合并统计结果
print(counts2)
#计算各个部分的占比
res107 = pd.DataFrame(counts2)
# res107.reset_index(inplace=True)
res107.index.name= '107类型'
res107.rename(columns={'type':'num'}, inplace=True)
res107['比例'] = res107['num'] / res107['num'].sum()
res107.reset_index(inplace = True)
print(res107)

 

# 代码11-4

def countquestion(i): # 自定义统计函数
j = i[['fullURLId']][i['fullURL'].str.contains('\?')].copy() # 找出类别包含107的网址
return j

#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

counts3 = [countquestion(i)['fullURLId'].value_counts() for i in sql]
counts3 = pd.concat(counts3).groupby(level=0).sum()
print(counts3)

# 求各个类型的占比并保存数据
df1 = pd.DataFrame(counts3)
df1['perc'] = df1['fullURLId']/df1['fullURLId'].sum()*100
df1.sort_values(by='fullURLId',ascending=False,inplace=True)
print(df1.round(4))

 

# 代码11-5

def page199(i): #自定义统计函数
j = i[['fullURL','pageTitle']][(i['fullURLId'].str.contains('199')) &
(i['fullURL'].str.contains('\?'))]
j['pageTitle'].fillna('空',inplace=True)
j['type'] = '其他' # 添加空列
j['type'][j['pageTitle'].str.contains('法律快车-律师助手')]= '法律快车-律师助手'
j['type'][j['pageTitle'].str.contains('咨询发布成功')]= '咨询发布成功'
j['type'][j['pageTitle'].str.contains('免费发布法律咨询' )] = '免费发布法律咨询'
j['type'][j['pageTitle'].str.contains('法律快搜')] = '快搜'
j['type'][j['pageTitle'].str.contains('法律快车法律经验')] = '法律快车法律经验'
j['type'][j['pageTitle'].str.contains('法律快车法律咨询')] = '法律快车法律咨询'
j['type'][(j['pageTitle'].str.contains('_法律快车')) |
(j['pageTitle'].str.contains('-法律快车'))] = '法律快车'
j['type'][j['pageTitle'].str.contains('空')] = '空'

return j

# 注意:获取一次sql对象就需要重新访问一下数据库
#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息
#sql = pd.read_sql_query('select * from all_gzdata limit 10000', con=engine)

counts4 = [page199(i) for i in sql] # 逐块统计
counts4 = pd.concat(counts4)
d1 = counts4['type'].value_counts()
print(d1)
d2 = counts4[counts4['type']=='其他']
print(d2)
# 求各个部分的占比并保存数据
df1_ = pd.DataFrame(d1)
df1_['perc'] = df1_['type']/df1_['type'].sum()*100
df1_.sort_values(by='type',ascending=False,inplace=True)
print(df1_)


# 代码11-6

def xiaguang(i): #自定义统计函数
j = i.loc[(i['fullURL'].str.contains('\.html'))==False,
['fullURL','fullURLId','pageTitle']]
return j

# 注意获取一次sql对象就需要重新访问一下数据库
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息

counts5 = [xiaguang(i) for i in sql]
counts5 = pd.concat(counts5)

xg1 = counts5['fullURLId'].value_counts()
print(xg1)
# 求各个部分的占比
xg_ = pd.DataFrame(xg1)
xg_.reset_index(inplace=True)
xg_.columns= ['index', 'num']
xg_['perc'] = xg_['num']/xg_['num'].sum()*100
xg_.sort_values(by='num',ascending=False,inplace=True)

xg_['type'] = xg_['index'].str.extract('(\d{3})') #提取前三个数字作为类别id

xgs_ = xg_[['type', 'num']].groupby('type').sum() #按类别合并
xgs_.sort_values(by='num', ascending=False,inplace=True) #降序排列
xgs_['percentage'] = xgs_['num']/xgs_['num'].sum()*100

print(xgs_.round(4))


# 代码11-7

# 分析网页点击次数
# 统计点击次数
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息

counts1 = [i['realIP'].value_counts() for i in sql] # 分块统计各个IP的出现次数
counts1 = pd.concat(counts1).groupby(level=0).sum() # 合并统计结果,level=0表示按照index分组
print(counts1)

counts1_ = pd.DataFrame(counts1)
counts1_
counts1['realIP'] = counts1.index.tolist()

counts1_[1]=1 # 添加1列全为1
hit_count = counts1_.groupby('realIP').sum() # 统计各个“不同点击次数”分别出现的次数
# 也可以使用counts1_['realIP'].value_counts()功能
hit_count.columns=['用户数']
hit_count.index.name = '点击次数'

# 统计1~7次、7次以上的用户人数
hit_count.sort_index(inplace = True)
hit_count_7 = hit_count.iloc[:7,:]
time = hit_count.iloc[7:,0].sum() # 统计点击次数7次以上的用户数
hit_count_7 = hit_count_7.append([{'用户数':time}], ignore_index=True)
hit_count_7.index = ['1','2','3','4','5','6','7','7次以上']
hit_count_7['用户比例'] = hit_count_7['用户数'] / hit_count_7['用户数'].sum()
print(hit_count_7)

# -*- coding: utf-8 -*-

# 代码11-1

import os
import pandas as pd


# # 修改工作路径到指定文件夹
# os.chdir("./")

# # 第一种连接方式
from sqlalchemy import create_engine

engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8',echo=True)
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

# 第二种连接方式
# import pymysql as pm

# con = pm.connect(
# host='localhost',
# user='root',
# password='123456',
# database='test',
# charset='utf8')
# data = pd.read_sql('select * from all_gzdata',con=con)
# con.close() #关闭连接

# # 保存读取的数据
# data.to_csv('../../data/0328/all_gzdata.csv', index=False, encoding='utf-8')

# -*- coding: utf-8 -*-

# 代码11-2

import pandas as pd
from sqlalchemy import create_engine

engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)
# 分析网页类型
counts = [i['fullURLId'].value_counts() for i in sql] #逐块统计
counts = counts.copy()
counts = pd.concat(counts).groupby(level=0).sum() # 合并统计结果,把相同的统计项合并(即按index分组并求和)
counts = counts.reset_index() # 重新设置index,将原来的index作为counts的一列。
counts.columns = ['index', 'num'] # 重新设置列名,主要是第二列,默认为0
counts['type'] = counts['index'].str.extract('(\d{3})') # 提取前三个数字作为类别id
counts_ = counts[['type', 'num']].groupby('type').sum() # 按类别合并
counts_.sort_values(by='num', ascending=False, inplace=True) # 降序排列
counts_['ratio'] = counts_.iloc[:,0] / counts_.iloc[:,0].sum()
print(counts_)

 

# 代码11-3

# 因为只有107001一类,但是可以继续细分成三类:知识内容页、知识列表页、知识首页
def count107(i): #自定义统计函数
j = i[['fullURL']][i['fullURLId'].str.contains('107')].copy() # 找出类别包含107的网址
j['type'] = None # 添加空列
j['type'][j['fullURL'].str.contains('info/.+?/')]= '知识首页'
j['type'][j['fullURL'].str.contains('info/.+?/.+?')]= '知识列表页'
j['type'][j['fullURL'].str.contains('/\d+?_*\d+?\.html')]= '知识内容页'
return j['type'].value_counts()
# 注意:获取一次sql对象就需要重新访问一下数据库(!!!)
#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

counts2 = [count107(i) for i in sql] # 逐块统计
counts2 = pd.concat(counts2).groupby(level=0).sum() # 合并统计结果
print(counts2)
#计算各个部分的占比
res107 = pd.DataFrame(counts2)
# res107.reset_index(inplace=True)
res107.index.name= '107类型'
res107.rename(columns={'type':'num'}, inplace=True)
res107['比例'] = res107['num'] / res107['num'].sum()
res107.reset_index(inplace = True)
print(res107)

 

# 代码11-4

def countquestion(i): # 自定义统计函数
j = i[['fullURLId']][i['fullURL'].str.contains('\?')].copy() # 找出类别包含107的网址
return j

#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)

counts3 = [countquestion(i)['fullURLId'].value_counts() for i in sql]
counts3 = pd.concat(counts3).groupby(level=0).sum()
print(counts3)

# 求各个类型的占比并保存数据
df1 = pd.DataFrame(counts3)
df1['perc'] = df1['fullURLId']/df1['fullURLId'].sum()*100
df1.sort_values(by='fullURLId',ascending=False,inplace=True)
print(df1.round(4))

 

# 代码11-5

def page199(i): #自定义统计函数
j = i[['fullURL','pageTitle']][(i['fullURLId'].str.contains('199')) &
(i['fullURL'].str.contains('\?'))]
j['pageTitle'].fillna('空',inplace=True)
j['type'] = '其他' # 添加空列
j['type'][j['pageTitle'].str.contains('法律快车-律师助手')]= '法律快车-律师助手'
j['type'][j['pageTitle'].str.contains('咨询发布成功')]= '咨询发布成功'
j['type'][j['pageTitle'].str.contains('免费发布法律咨询' )] = '免费发布法律咨询'
j['type'][j['pageTitle'].str.contains('法律快搜')] = '快搜'
j['type'][j['pageTitle'].str.contains('法律快车法律经验')] = '法律快车法律经验'
j['type'][j['pageTitle'].str.contains('法律快车法律咨询')] = '法律快车法律咨询'
j['type'][(j['pageTitle'].str.contains('_法律快车')) |
(j['pageTitle'].str.contains('-法律快车'))] = '法律快车'
j['type'][j['pageTitle'].str.contains('空')] = '空'

return j

# 注意:获取一次sql对象就需要重新访问一下数据库
#engine = create_engine('mysql+pymysql://root:[email protected]:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息
#sql = pd.read_sql_query('select * from all_gzdata limit 10000', con=engine)

counts4 = [page199(i) for i in sql] # 逐块统计
counts4 = pd.concat(counts4)
d1 = counts4['type'].value_counts()
print(d1)
d2 = counts4[counts4['type']=='其他']
print(d2)
# 求各个部分的占比并保存数据
df1_ = pd.DataFrame(d1)
df1_['perc'] = df1_['type']/df1_['type'].sum()*100
df1_.sort_values(by='type',ascending=False,inplace=True)
print(df1_)


# 代码11-6

def xiaguang(i): #自定义统计函数
j = i.loc[(i['fullURL'].str.contains('\.html'))==False,
['fullURL','fullURLId','pageTitle']]
return j

# 注意获取一次sql对象就需要重新访问一下数据库
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息

counts5 = [xiaguang(i) for i in sql]
counts5 = pd.concat(counts5)

xg1 = counts5['fullURLId'].value_counts()
print(xg1)
# 求各个部分的占比
xg_ = pd.DataFrame(xg1)
xg_.reset_index(inplace=True)
xg_.columns= ['index', 'num']
xg_['perc'] = xg_['num']/xg_['num'].sum()*100
xg_.sort_values(by='num',ascending=False,inplace=True)

xg_['type'] = xg_['index'].str.extract('(\d{3})') #提取前三个数字作为类别id

xgs_ = xg_[['type', 'num']].groupby('type').sum() #按类别合并
xgs_.sort_values(by='num', ascending=False,inplace=True) #降序排列
xgs_['percentage'] = xgs_['num']/xgs_['num'].sum()*100

print(xgs_.round(4))


# 代码11-7

# 分析网页点击次数
# 统计点击次数
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
sql = pd.read_sql('all_gzdata', engine, chunksize = 10000)# 分块读取数据库信息

counts1 = [i['realIP'].value_counts() for i in sql] # 分块统计各个IP的出现次数
counts1 = pd.concat(counts1).groupby(level=0).sum() # 合并统计结果,level=0表示按照index分组
print(counts1)

counts1_ = pd.DataFrame(counts1)
counts1_
counts1['realIP'] = counts1.index.tolist()

counts1_[1]=1 # 添加1列全为1
hit_count = counts1_.groupby('realIP').sum() # 统计各个“不同点击次数”分别出现的次数
# 也可以使用counts1_['realIP'].value_counts()功能
hit_count.columns=['用户数']
hit_count.index.name = '点击次数'

# 统计1~7次、7次以上的用户人数
hit_count.sort_index(inplace = True)
hit_count_7 = hit_count.iloc[:7,:]
time = hit_count.iloc[7:,0].sum() # 统计点击次数7次以上的用户数
hit_count_7 = hit_count_7.append([{'用户数':time}], ignore_index=True)
hit_count_7.index = ['1','2','3','4','5','6','7','7次以上']
hit_count_7['用户比例'] = hit_count_7['用户数'] / hit_count_7['用户数'].sum()
print(hit_count_7)

 

# -*- coding: utf-8 -*-

# 代码11-14

import pandas as pd
# 利用训练集数据构建模型
UI_matrix_tr = pd.DataFrame(0,index=IP_tr,columns=url_tr)
# 求用户-物品矩阵
print('start')
count = 0
for i in data_tr.index:
count += 1
if (count%1000 == 0):
print('loop1',count/1000,'k')
UI_matrix_tr.loc[data_tr.loc[i,'realIP'],data_tr.loc[i,'fullURL']] = 1
sum(UI_matrix_tr.sum(axis=1))
# 求物品相似度矩阵(因计算量较大,需要耗费的时间较久)

Item_matrix_tr = pd.DataFrame(0,index=url_tr,columns=url_tr)
print(Item_matrix_tr.index)
count = 0
for i in Item_matrix_tr.index:
for j in Item_matrix_tr.index:
count += 1
if (count%10000 == 0):
print('loop2',count/10000,'w')

a = sum(UI_matrix_tr.loc[:,[i,j]].sum(axis=1)==2)
b = sum(UI_matrix_tr.loc[:,[i,j]].sum(axis=1)!=0)
Item_matrix_tr.loc[i,j] = a/b

# 将物品相似度矩阵对角线处理为零

count = 0
for i in Item_matrix_tr.index:
count += 1
if (count%10000 == 0):
print('loop3',count/10000,'w')

Item_matrix_tr.loc[i,i]=0

# 利用测试集数据对模型评价
print(data_te)
IP_te = data_te.iloc[:,0]
url_te = data_te.iloc[:,1]
IP_te = list(set(IP_te))
url_te = list(set(url_te))

# 测试集数据用户物品矩阵
UI_matrix_te = pd.DataFrame(0,index=IP_te,columns=url_te)

count = 0
for i in data_te.index:
count += 1
if (count%10000 == 0):
print('loop4',count/10000,'w')

UI_matrix_te.loc[data_te.loc[i,'realIP'],data_te.loc[i,'fullURL']] = 1
# print(UI_matrix_te)
# 对测试集IP进行推荐
Res = pd.DataFrame('NaN',index=data_te.index,
columns=['IP','已浏览网址','推荐网址','T/F'])
Res.loc[:,'IP']=list(data_te.iloc[:,0])
Res.loc[:,'已浏览网址']=list(data_te.iloc[:,1])

print(Res)

# 开始推荐
# import numpy as np
# print(Res.loc[0,'已浏览网址'])
# print(Item_matrix_tr.loc[Res.loc[0,'已浏览网址'],:].index.shape)
# print(Item_matrix_tr.loc[Res.loc[0,'已浏览网址'],:].shape)
# print(Res.loc[:,'IP'])


for i in Res.index:
if Res.loc[i,'已浏览网址'] in list(Item_matrix_tr.index):
Res.loc[i,'推荐网址'] = Item_matrix_tr.loc[Res.loc[i,'已浏览网址'],
:].argmax()

Res.loc[i,'推荐网址'] = Item_matrix_tr.loc[Res.loc[i,'已浏览网址'],:].index[Res.loc[i,'推荐网址']]


# print(Item_matrix_tr.loc[Res.loc[i,'已浏览网址'],:].index[Res.loc[i,'推荐网址']])
# print(Item_matrix_tr.loc[Res.loc[i,'已浏览网址'],:][Res.loc[i,'推荐网址']])
# print(Res.loc[i,'推荐网址'])
if Res.loc[i,'推荐网址'] in url_te:
Res.loc[i,'T/F']=UI_matrix_te.loc[Res.loc[i,'IP'],Res.loc[i,'推荐网址']]==1
else:
Res.loc[i,'T/F'] = False
print(Res)

# 保存推荐结果
Res.to_csv('../../data/0328/Res.csv',index=False,encoding='utf8')

# -*- coding: utf-8 -*-

# 代码11-15

import pandas as pd
# 读取保存的推荐结果
Res = pd.read_csv('../../data/0328/Res.csv',keep_default_na=False, encoding='utf8')

# 计算推荐准确率
Pre = round(sum(Res.loc[:,'T/F']=='True') / (len(Res.index)-sum(Res.loc[:,'T/F']=='NaN')), 3)

print(Pre)

# 计算推荐召回率
Rec = round(sum(Res.loc[:,'T/F']=='True') / (sum(Res.loc[:,'T/F']=='True')+sum(Res.loc[:,'T/F']=='NaN')), 3)

print(Rec)

# 计算F1指标
F1 = round(2*Pre*Rec/(Pre+Rec),3)
print(F1)

 

标签:engine,index,电子商务,网站,推荐,pd,sql,counts,type
From: https://www.cnblogs.com/quanshi/p/17315340.html

相关文章

  • 【D02】Bootstrap免费精选模板推荐,附上Django中使用模板教程
    前端模板-AnchorUIKIT前言今天介绍一款制作精良、开源、免费的Bootstrap模板——AnchorUIKIT该模板使用的是Bootstrapv4版本本文将介绍如何在Django中导入该模板的静态资源包并使用介绍官方文档Anchor-afreeBootstrapUIKit(bootcss.com)预览官方文档......
  • Android开发startActivityForResult()弃用,推荐使用registerForActivityResult()方法
    SecondActivity中publicclassSecondActivityextendsAppCompatActivity{privatestaticfinalStringTAG="SecondActivity";@OverrideprotectedvoidonCreate(BundlesavedInstanceState){super.onCreate(savedInstanceState);......
  • 2023年4月的12篇AI论文推荐
    GPT-4发布仅仅三周后,就已经随处可见了。本月的论文推荐除了GPT-4以外还包括、语言模型的应用、扩散模型、计算机视觉、视频生成、推荐系统和神经辐射场。1、GPT-4TechnicalReporthttps://arxiv.org/abs/2303.08774SébastienBubeck,VarunChandrasekaran,RonenEldan,Joh......
  • 用java做一个并发10W的网站,需要用到的技术有这些!
        你需要使用以下技术和工具:JavaWeb框架:选择一个性能高、可扩展性好、易于使用和维护的JavaWeb框架,如SpringMVC、Struts2、PlayFramework等。数据库:使用高性能的数据库系统,如MySQL、PostgreSQL、Oracle等。使用数据库连接池可以提高性能。缓存:使用缓存可......
  • 脚本推荐-定时日志
    定时日志简介在设计时,常常忘记时间一些需要记录的东西往往由于没有养成习惯而忘记记录这样导致无法追溯设计的日志失去了分析个人能力和效率的手段为了强制养成写日志的习惯研发了一个定时日志的脚本利用win的定时器触发每小时弹出一个写日志的输入框并自动添加时......
  • 网站------网站进行备案处理
    需要在工信部(中华人民共和国工业和信息化部https://wap.miit.gov.cn/)里面设置 下面是具体备案的地址https://beian.miit.gov.cn/#/Integrated/index ......
  • 分享15个对Web开发人员有用的高效工具网站
    1.代码转图片网址:https://carbon.now.sh/使用Carbon创建和分享源代码的精美图像。它提供了多种代码风格和主题。2.JavaScript正则表达式可视化工具网址:https://jex.im/regulex/#!flags=&re=%5E(a%7Cb)*%3F%24   你是那种因为看起来复杂而不想学习正则表达式的人吗......
  • 十大股票资讯网站排名 炒股资讯网站排行榜 炒股必看的资讯网站推荐
    1、东方财富网著名的专业股票网站,创业板首家市值突破千亿的上市公司,是深证成份指数、创业板指数、中证100指数、沪深300指数和深证100指数的样本股。其股票频道提供24小时全球股票行情、股市直播、大盘分析、板块聚焦、焦点点评、报刊头条、热门股追踪、个股点睛、个股精华、公司......
  • 手机号码归属地 API 实现个性化推荐的思路分析
    前言随着移动互联网和智能手机的普及,越来越多的人使用手机上网和购物,移动营销已成为企业获取用户和提升品牌知名度的重要手段。手机号码归属地API作为移动营销的关键工具,具有广阔的应用前景。本文将探讨如何利用手机号码归属地API进行个性化推荐和精准广告投放,希望对大家有......
  • Scheme语言在线代码运行编译工具推荐
    Scheme语言在线运行编译,是一款可在线编程编辑器,在编辑器上输入Scheme语言代码,点击运行,可在线编译运行Scheme语言,Scheme语言代码在线运行调试,Scheme语言在线编译,可快速在线测试您的Scheme语言代码,在线编译Scheme语言代码发现是否存在错误,如果代码测试通过,将会输出编译后的......