import jieba
path = "all.txt" # 读取文本文件
file = open(path, "r", encoding="utf-8")
text = file.read()
file.close()
words = jieba.lcut(text) # 使用jieba分词
counts = {} # 统计词频
for word in words:
if len(word) == 1: # 过滤掉长度为1的词语
continue
counts[word] = counts.get(word, 0) + 1 # 更新字典中的词频
items = list(counts.items()) # 对字典中的键值对进行排序
items.sort(key=lambda x: x[1], reverse=True)
标签:jieba,word,items,file,counts,分词 From: https://www.cnblogs.com/yxx0818/p/17909506.html
for i in range(20): # 输出前20个高频词语
word, count = items[i]
print(f"{word:<10}{count:>5}")
学号:2022310143040
班级:22信计1班
姓名:曾翠