import pandas as pd
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.probability import FreqDist
# 下载停用词列表(如果未下载的话)
nltk.download('stopwords')
df = pd.read_csv("result_table02.csv", encoding='utf-8')
# 清洗项目简介数据,确保数据是有效的字符串类型
df['项目简介'] = df['项目简介'].fillna('') # 将空值填充为字符串
# 根据项目简介提取关键字
def extract_keywords(text):
tokens = word_tokenize(text) # 分词
tokens = [word for word in tokens if word.isalpha()] # 只保留字母
tokens = [word for word in tokens if word.lower() not in stopwords.words('english')] # 去除停用词
stemmer = PorterStemmer()
tokens = [stemmer.stem(word) for word in tokens] # 词干提取
fdist = FreqDist(tokens)
return fdist.most_common(3) # 返回出现频率最高的5个词作为关键字
# 在数据框中应用提取关键字的函数
df['关键字'] = df['项目简介'].apply(extract_keywords)
# 将关键字列表转换为逗号分隔的字符串
df['关键字'] = df['关键字'].apply(lambda x: ' '.join([word for word, freq in x]))
# 将关键字写回数据文件
df.to_csv("result_table03.csv", index=False)
print("补充关键词成功")
标签:word,关键字,补充,关键词,tokens,df,import,数据,nltk From: https://www.cnblogs.com/lin513/p/18095624