`
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
templates = [
"分析一下攻击队QAX的攻击行为",
"分析一下防守单位QAX的防守情况",
"分析一下目标资产1.1.1.1相关的攻击行为",
"攻击队QAX在防守单位QAX1上得了多少分",
"防守单位QAX1在x类威胁上累计扣分多少分",
"靶标系统有哪些",
"1.1.1.1是否是靶标系统",
"攻击IP1.1.1.1属于哪个攻击队",
]
结巴分词切割句子得到关键字列表
def tokenize(text):
return list(jieba.cut(text))
question = args.get("question", "")
logging.info(f"==输入的问题=: {question}")
# 使用TF-IDF向量化文本
vectorizer = TfidfVectorizer(tokenizer=tokenize)
vectors = vectorizer.fit_transform([question] + templates)
# 计算相似度
similarities = cosine_similarity(vectors[0], vectors[1:]).flatten()
# 找到最相似的句子
most_similar_index = similarities.argmax()
key = templates[most_similar_index]
return key
`
标签:1.1,例句,text,question,QAX,分词,句子,sklearn From: https://www.cnblogs.com/gatling/p/17860129.html