源码请到:自然语言处理练习: 学习自然语言处理时候写的一些代码 (gitee.com)
数据来源:麦卡里价格建议挑战Mercari Price Suggestion Challenge | Kaggle
如果不会使用魔法可以使用百度云
链接:https://pan.baidu.com/s/1EM2MwjX4bLlypLSIJYZqeg?pwd=xqs0
提取码:xqs0
一、加载数据集
拿到数据集后首先对数据集的数据进行一些显示展示,来了解数据集
# 设置pandas显示配置 pd.set_option('display.max_columns', 1000) pd.set_option('display.width', 1000) pd.set_option('display.max_colwidth', 1000) train = pd.read_csv('data/train.tsv', sep='\t') test = pd.read_csv('data/test.tsv', sep='\t') # 查看数据集大小 print(train.shape) print(test.shape) # 查看数据集列名 print(train.dtypes) # 查看数据集前几行 print(train.head()) # 查看商品价格信息 print(train.price.describe())
二、分析影响价格的因素
经过第一步已经大致了解了数据集的内容以及价格的范围大小及均值,下面我们进一步对影响价格的因素进行分析
2.1 对价格进行对数变换,比较变换后价格的分布情况
由于价格的分布十分的散乱,所以对价格进行对数变换处理
# 对价格进行对数变换,比较转换前和转换后的分布情况 plt.subplot(1, 2, 1) (train['price']).plot.hist(bins=50, figsize=(20, 10), edgecolor='white', range=[0, 250]) plt.xlabel('price+', fontsize=17) plt.ylabel('frequency', fontsize=17) plt.tick_params(labelsize=15) plt.title('Price Distribution - Training Set', fontsize=17) plt.subplot(1, 2, 2) np.log(train['price'] + 1).plot.hist(bins=50, figsize=(20, 10), edgecolor='white') plt.xlabel('log(price+1)', fontsize=17) plt.ylabel('frequency', fontsize=17) plt.tick_params(labelsize=15) plt.title('Log(Price) Distribution - Training Set', fontsize=17) plt.show()
可以看出,原数据十分不均衡,高价区区间大数量少,经过对数处理的数据更加符合正态分布
2.2 包邮对价格的影响
接下来分析商户包邮是否对价格产生影响
# 运费承担:大概有55%卖家承担运费 print(train.shipping.value_counts() / len(train))
可以看出,大概有百分之五十五的商户包邮
# 看下运费不同情况下的价格变化(包邮的价格贵一些) prc_shipBySeller = train.loc[train.shipping == 0, 'price'] prc_shipByBuyer = train.loc[train.shipping == 1, 'price'] fig, ax = plt.subplots(figsize=(20, 10)) ax.hist(np.log(prc_shipBySeller + 1), color='#8CB4E1', alpha=1.0, bins=50, label='Price when Seller pays Shipping') ax.hist(np.log(prc_shipByBuyer + 1), color='#007D00', alpha=0.7, bins=50, label='Price when Buyer pays Shipping') ax.set(title='Histogram Comparison', ylabel='% of Dataset in Bin') plt.legend() plt.xlabel('log(price+1)', fontsize=17) plt.ylabel('frequency', fontsize=17) plt.title('Price Distribution by Shipping Type', fontsize=17) plt.tick_params(labelsize=15) plt.show()
可以看出,包邮的整体价格比不包邮的价格贵一些
2.3 商品类别对价格的影响
首先我们统计下商品的类别
# 商品类别划分 print('There are %d unique values in the category column' % train['category_name'].nunique()) print(train['category_name'].value_counts()[:5]) print('There are %d items that do not have a label' % train['category_name'].isnull().sum())
可以看出总共有1287种商品,并且展示了数量前五的商品,还有6327件商品没有类别标签,我们处理的时候就忽略这些商品
商品类别太多了,可以看到商品类别结构为主类/子类1/子类2的格式,我们进行拆分,将类别合并一些
# 商品类别太多了,合并一下 def split_cat(text): try: return text.split("/") except: return "No Label", "No Label", "No Label" train['general_cat'], train['subcat_1'], train['subcat_2'] = zip(*train['category_name'].apply(lambda x: split_cat(x))) print(train.head()) test['general_cat'], test['subcat_1'], test['subcat_2'] = zip(*test['category_name'].apply(lambda x: split_cat(x))) print('There are %d unique general_cat' % train['general_cat'].nunique()) print('There are %d unique first sub-categories' % train['subcat_1'].nunique()) print('There are %d unique second sub-categories' % train['subcat_2'].nunique())
可以看出总共有11个主类,114个子类1,871个子类2
接下来分析下主类的分布情况
# 主类别分布情况 x = train['general_cat'].value_counts().index.values.astype('str') y = train['general_cat'].value_counts().values pct = [('%.2f' % (v * 100)) + '%' for v in (y / len(train))] tracel = go.Bar(x=x, y=y, text=pct) layout = dict(title="Number of Items by Main Category", yaxis=dict(title='Count'), xaxis=dict(title='Category')) fig = dict(data=[tracel], layout=layout) py.iplot(fig)
可以看出,大量商品是关于女这种类别的,占据了百分之四十五,第二多的是化妆品,第三多的是孩子。
子类的数量很多,我们展示前15个子类的分布
# 前15个子类别分布情况 x = train['subcat_1'].value_counts().index.values.astype('str')[:15] y = train['subcat_1'].value_counts().values[:15] pct = [('%.2f' % (v * 100)) + '%' for v in (y / len(train))][:15] tracel = go.Bar(x=x, y=y, text=pct, marker=dict(color=y, colorscale='Portland', showscale=True, reversescale=False)) layout = dict(title="Number of Items by Sub Category(Top 15)", yaxis=dict(title='Count'), xaxis=dict(title='SubCategory')) fig = dict(data=[tracel], layout=layout) py.iplot(fig)
接下来看看不同主类商品的价格区间
# 不同类型商品价格浮动区间 general_cats = train['general_cat'].unique() x = [train.loc[train['general_cat'] == cat, 'price'] for cat in general_cats] data = [go.Box(x=np.log(x[i] + 1), name=general_cats[i]) for i in range(len(general_cats))] layout = dict(title='Price Distribution by General Category', yaxis=dict(title='Frequency'), xaxis=dict(title='Category')) fig = dict(data=data, layout=layout) py.iplot(fig)
2.4 品牌分布情况
分析一下品牌的分布情况
# 前10品牌名称的数据分布 x = train['brand_name'].value_counts().index.values.astype('str')[:10] y = train['brand_name'].value_counts().values[:10] tracel = go.Bar(x=x, y=y, marker=dict(color=y, colorscale='Portland', showscale=True, reversescale=False)) layout = dict(title="Top 10 Brand by Number of Items", yaxis=dict(title='Count'), xaxis=dict(title='Brand Name')) fig = dict(data=[tracel], layout=layout) py.iplot(fig)
2.5 商品描述长度对商品的影响
统计商品描述的长度,然后研究其对商品的影响
# 商品描述对价格的影响 def wordCount(text): try: text = text.lower() regex = re.compile('[' + re.escape(string.punctuation) + '0-9\\r\\t\\n]') txt = regex.sub(' ', text) words = [w for w in txt.split(" ") if w not in stop_words.STOP_WORDS and len(w) > 3] return len(words) except: return 0 train['desc_len'] = train['item_description'].apply(lambda x: wordCount(x)) test['desc_len'] = test['item_description'].apply(lambda x: wordCount(x)) print(train.head()) df = train.groupby('desc_len')['price'].mean().reset_index() tracel = go.Scatter(x=df['desc_len'], y=np.log(df['price'] + 1), mode='lines+markers', name='lines+markers') layout = dict(title='Average Log(Price) by Description Length', yaxis=dict(title='Average Log(Price)'), xaxis=dict(title='Description Length')) fig = dict(data=[tracel], layout=layout) py.iplot(fig)
可以看到商品描述适中价格越高,描述短的可能因为功能简单所以价格低,描述长的可能因为小众所以价格低
三、商品描述关键字
3.1 统计常用关键字
统计一下商品描述中常用的关键字,注意,有的商品没有商品描述,需要去掉
print(train.item_description.isnull().sum()) # 去掉缺失值 train = train[pd.notnull(train['item_description'])] # 提取每种品牌的描述关键词 tokenize = nltk.data.load('tokenizers/punkt/english.pickle') cat_desc = dict() for cat in general_cats: text = ' '.join(train.loc[train['general_cat'] == cat, 'item_description'].values) cat_desc[cat] = tokenize.tokenize(text) # 统计常用关键词 flat_lst = [item for sublist in list(cat_desc.values()) for item in sublist] allWordsCount = Counter(flat_lst) all_top10 = allWordsCount.most_common(20) x = [w[0] for w in all_top10] y = [w[1] for w in all_top10] tracel = go.Bar(x=x, y=y) layout = dict(title='Word Frequency', yaxis=dict(title='Count'), xaxis=dict(title='Word')) fig = dict(data=[tracel], layout=layout) py.iplot(fig)
3.2 分别展示不同商品的关键字
首先将商品描述进行分词,去掉停用词
# 展示不同商品的关键词 stop = set(stopwords.words('english')) def tokenize(text): try: regex = re.compile('[' + re.escape(string.punctuation) + '0-9\\r\\t\\n]') text = regex.sub(' ', text) tokens_ = [word_tokenize(s) for s in sent_tokenize(text)] tokens = [] for token_by_sent in tokens_: tokens += token_by_sent tokens = list(filter(lambda t: t.lower() not in stop, tokens)) filtered_tokens = [w for w in tokens if re.search('[a-zA-Z]', w)] filtered_tokens = [w.lower() for w in filtered_tokens if len(w) >= 3] return filtered_tokens except TypeError as e: print(text, e) train['tokens'] = train['item_description'].map(tokenize) test['tokens'] = test['item_description'].map(tokenize) train.reset_index(drop=True, inplace=True) test.reset_index(drop=True, inplace=True) for description, tokens in zip(train['item_description'].head(), train['tokens'].head()): print('description:', description) print('tokens:', tokens) print()
cat_desc = dict() for cat in general_cats: text = ' '.join(train.loc[train['general_cat'] == cat, 'item_description'].values) cat_desc[cat] = tokenize(text) cat_desc100 = dict() for key, value in cat_desc.items(): cat_desc100[key] = Counter(value).most_common() def generate_wordcloud(tup): wordcloud = WordCloud(background_color='white', max_words=50, max_font_size=40, random_state=42).generate(str(tup)) return wordcloud fig, axes = plt.subplots(len(cat_desc100) // 2 + 1, 2, figsize=(30, 15)) for i, (key, cat) in enumerate(cat_desc100.items()): ax = axes[i // 2, i % 2] ax.imshow(generate_wordcloud(cat), interpolation='bilinear') ax.axis('off') ax.set_title("%s Top 100" % key, fontsize=12) plt.show()
对每个类别提取数量最多的前100个关键字统计词频生成词云
四、tfidf算法
可以看出不同类别出现的关键字有很多是相似的,不能代表这种类别的商品,所以我们使用tf-idf算法进行关键字的挖掘,tf-idf基本思想是词在本文章中出现的次数越多在其他文章中出现的次数越少越可能是关键字。首先将描述数据扩展到180000维度,在进行tf-idf打分
# tf-idf vectorizer = TfidfVectorizer(min_df=10, max_features=180000, tokenizer=tokenize, ngram_range=(1, 2)) all_desc = np.append(train['item_description'].values, test['item_description'].values) vz = vectorizer.fit_transform(list(all_desc)) print(vz.shape) tfidf = dict(zip(vectorizer.get_feature_names_out(), vectorizer.idf_)) tfidf = pd.DataFrame(columns=['tfidf']).from_dict(dict(tfidf), orient='index') tfidf.columns = ['tfidf'] print(tfidf.sort_values(by=['tfidf'], ascending=True).head(10)) print(tfidf.sort_values(by=['tfidf'], ascending=False).head(10))
可以看出停用词的得分值基本上都比较低,因为他们虽然频率高但是不具备什么代表性的价值,而另一批的词得分就很高,可以作为关键词来分析
接下来使用SVD降维将特征的维度降到50,然后使用t-SNE将维度降维到2进行展示
trn = train.copy() tst = test.copy() trn['is_train'] = 1 tst['is_train'] = 0 sample_sz = 15000 # 采样 combined_df = pd.concat([trn, tst]) combined_sample = combined_df.sample(n=sample_sz) vz_sample = vectorizer.fit_transform(list(combined_sample['item_description'])) # SVD 降维 n_comp = 30 svd = TruncatedSVD(n_components=n_comp, random_state=42) svd_tfidf = svd.fit_transform(vz_sample) # t-SNE降维 tsne_model = TSNE(n_components=2, verbose=1, random_state=42, n_iter=500) tsne_tfidf = tsne_model.fit_transform(svd_tfidf) plot_tfidf = bp.figure(width=700, height=600, title='tf-idf clustring of the item description', tools='pan, wheel_zoom, box_zoom, reset, hover', x_axis_type=None, y_axis_type=None, min_border=1) combined_sample.reset_index(inplace=True, drop=True) tfidf_df = pd.DataFrame(tsne_tfidf, columns=['x', 'y']) tfidf_df['description'] = combined_sample['item_description'] tfidf_df['tokens'] = combined_sample['tokens'] tfidf_df['category'] = combined_sample['general_cat'] plot_tfidf.scatter(x='x', y='y', source=tfidf_df, alpha=0.7) hover = plot_tfidf.select(dict(type=HoverTool)) hover.tooltips = {'description': '@description', 'tokens': '@tokens', 'category': '@category'} show(plot_tfidf)
关键词比较接近的就会被绘制在一个点位置
五、分类
5.1 使用聚类算法对上面数据的点可以进行分类
# 聚类分堆 num_clusters = 10 kmeans_model = MiniBatchKMeans(n_clusters=num_clusters, init='k-means++', n_init=1, init_size=10000, batch_size=1000, verbose=0, max_iter=1000) kmeans_model.fit(vz_sample) kmeans_clusters = kmeans_model.predict(vz_sample) kmeans_distances = kmeans_model.transform(vz_sample) tsne_kmeans = tsne_model.fit_transform(kmeans_distances) kmeans_df = pd.DataFrame(tsne_kmeans, columns=['x', 'y']) kmeans_df['cluster'] = kmeans_clusters kmeans_df['description'] = combined_sample['item_description'] kmeans_df['category'] = combined_sample['general_cat'] plot_kmeans = bp.figure(width=700, height=600, title='KMeans clustering of the description', tools='pan, wheel_zoom, box_zoom, reset, hover', x_axis_type=None, y_axis_type=None, min_border=1) print(kmeans_clusters) colormap = {'0': 'red', '1': 'green', '2': 'blue', '3': 'black', '4': 'yellow', '5': 'pink', '6': 'purple', '7': 'grey', '8': 'brown', '9': 'orange'} def get_color(num): if num == 0: return 'red' elif num == 1: return 'green' elif num == 2: return 'blue' elif num == 3: return 'black' elif num == 4: return 'yellow' elif num == 5: return 'pink' elif num == 6: return 'purple' elif num == 7: return 'grey' elif num == 8: return 'brown' elif num == 9: return 'orange' color = pd.Series(kmeans_clusters).apply(get_color) source = ColumnDataSource( data=dict(x=kmeans_df['x'], y=kmeans_df['y'], color=color, description=kmeans_df['description'], category=kmeans_df['category'], cluster=kmeans_df['cluster'])) plot_kmeans.scatter(x='x', y='y', color='color', source=source) hover = plot_kmeans.select(dict(type=HoverTool)) hover.tooltips = {'description': '@description', 'category': '@category', 'cluster': '@cluster'} show(plot_kmeans)
5.2 LDA主题模型分类
除了聚类算法外,也可以使用LDA主题模型进行分类
# LDA分堆 cvectorizer = CountVectorizer(min_df=4, max_features=180000, tokenizer=tokenize, ngram_range=(1, 2)) cvz = cvectorizer.fit_transform(combined_sample['item_description']) lda_model = LatentDirichletAllocation(n_components=10, learning_method='online', max_iter=20, random_state=42) X_topics = lda_model.fit_transform(cvz) # 获取主题 n_top_words = 10 topic_summaries = [] topic_word = lda_model.components_ vocab = cvectorizer.get_feature_names_out() for i, topic_dist in enumerate(topic_word): topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words + 1):-1] topic_summaries.append(' '.join(topic_words)) print('Topic {}:{}'.format(i, '|'.join(topic_words))) tsne_lda = tsne_model.fit_transform(X_topics) unnormalized = np.matrix(X_topics) doc_topic = unnormalized / unnormalized.sum(axis=1) lda_keys = [] for i, tweet in enumerate(combined_sample['item_description']): lda_keys += [doc_topic[i].argmax()] lda_df = pd.DataFrame(tsne_lda, columns=['x', 'y']) lda_df['description'] = combined_sample['item_description'] lda_df['category'] = combined_sample['general_cat'] lda_df['topic'] = lda_keys lda_df['topic'] = lda_df['topic'].map(int) plot_lda = bp.figure(width=700, height=600, title='LDA topic visualization', tools='pan, wheel_zoom, box_zoom, reset, hover', x_axis_type=None, y_axis_type=None, min_border=1) source = ColumnDataSource( data=dict(x=lda_df['x'], y=lda_df['y'], color=color, description=lda_df['description'], topic=lda_df['topic'], category=lda_df['category'])) plot_lda.scatter(x='x', y='y', color='color', source=source) hover = plot_lda.select(dict(type=HoverTool)) hover.tooltips = {'description': '@description', 'topic': '@topic', 'category': '@category'} show(plot_lda)
把不同关键字分为了十个主题
标签:nlp,商品信息,description,title,df,cat,train,可视化,dict From: https://www.cnblogs.com/zhangshihang/p/17613582.html