基于tfidf 以及 lsi 的文本相似度分析
程序员文章站
2022-06-04 09:02:18
...
本文主要为了计算文档之间的相似度。标准语聊为我们训练模型所需的,用户语料则用来测试与标准语聊的相似度
1、 数据预处理部分,见注释
对标准语聊进行处理如下
ws = open('d:/sentence.csv','r',encoding='gbk')
times = 0
import re
import jieba
standard_data = [] ###标准语料
map_value = {}
seed = 0
from zhon.hanzi import punctuation
for i in ws.readlines():
times += 1
if times == 1:
continue
newline = i.strip().split(',')
newline = re.sub("[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%\-\_]", "", newline[0])
newline = re.sub(' ','',newline)
newline = re.sub("[%s]+" %punctuation, "", newline)
standard_data.append(list(jieba.cut(newline)))
seed += 1
map_value[seed-1] = newline
ws.close()
2、tf-idf
from gensim import corpora, models, similarities
# 生成字典和向量语料
dictionary = corpora.Dictionary(standard_data)
# 通过下面一句得到语料中每一篇文档对应的稀疏向量(这里是bow向量)
corpus = [dictionary.doc2bow(text) for text in standard_data]
# corpus是一个返回bow向量的迭代器。下面代码将完成对corpus中出现的每一个特征的IDF值的统计工作
tfidf_model = models.TfidfModel(corpus)
corpus_tfidf = tfidf_model[corpus]
####文档相似性的计算
map_value_user = {}
import jieba
import re
raw_data = []
w = open('d:/user_content_v2.txt','r',encoding= 'utf-8')
start = 0
for line in w.readlines():
newline = line.strip()
newline = re.sub(' ','',newline)
newline2 = jieba.cut(newline)
newline2 = list(newline2)
map_value_user[start] = newline
raw_data.append(newline2)
start += 1
w.close()
index = similarities.MatrixSimilarity(corpus_tfidf)
vec_bow =[dictionary.doc2bow(text) for text in raw_data] #把用户语料转为词包
all_reult_sims = []
times_v2 = 0
###对每个用户语聊与标准语聊计算相似度
for i in vec_bow:
#直接使用上面得出的tf-idf 模型即可得出商品描述的tf-idf 值
sims = index[tfidf_model[i]]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
result_sims = []
for i,j in sims:
result_sims.append([map_value_user[times_v2],map_value[i],j])
times_v2 += 1
all_reult_sims.append(result_sims[:20])
3、lsi
lsi = models.LsiModel(corpus_tfidf)
corpus_lsi = lsi[corpus_tfidf]
####文档相似性的计算
map_value_user = {}
import jieba
import re
raw_data = []
w = open('d:/user_content_v2.txt','r',encoding= 'utf-8')
start = 0
for line in w.readlines():
newline = line.strip()
newline = re.sub(' ','',newline)
newline2 = jieba.cut(newline)
newline2 = list(newline2)
map_value_user[start] = newline
raw_data.append(newline2)
start += 1
w.close()
index = similarities.MatrixSimilarity(corpus_lsi)
vec_bow =[dictionary.doc2bow(text) for text in raw_data] #把商品描述转为词包
all_reult_sims = []
times_v2 = 0
for i in vec_bow:
#直接使用上面得出的tf-idf 模型即可得出商品描述的tf-idf 值
sims = index[lsi[tfidf_model[i]]]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
result_sims = []
for i,j in sims:
result_sims.append([map_value_user[times_v2],map_value[i],j])
times_v2 += 1
all_reult_sims.append(result_sims[:20])