fasttext 训练词向量 并 使用余弦相似度得出短文本的相似度

# -*- coding: utf-8 -*-
import os

import fasttext
import jieba
import numpy as np
import tqdm
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

base_path = os.path.dirname(os.path.abspath(__file__))
# 我这里使用的 sqlite 连接的某个数据库
# 下面用的表是聊天数据的表,其中我只会使用到 sentence 也就是聊天内容的字段
database_path = os.path.dirname(base_path)
database_dir = os.path.join(database_path, "datas", "data.db")

# 加载 jieba 分词词典
jieba.load_userdict(os.path.join(base_path, "lcut.txt"))
jieba.load_userdict(os.path.join(base_path, "500000-dict.txt"))

def get_data():
	”“”只需保证通过get_data,得到一个所有内容分词后的 txt 即可,词与词之间空格间隔“”“
    engine = create_engine('sqlite:///{}'.format(database_dir))

    Session = sessionmaker(bind=engine)

    session = Session() # type: sqlalchemy.orm.session.Session

    r = session.execute("select sentence from chat where role=0 limit 1000000")

    sentence_objs = r.fetchall()

    sentence_objs_len = len(sentence_objs)

    pass_words = ["message", "http", "系统提示", "戳"]

    with open("finance_news_cut.txt", "w", encoding='utf-8') as f:
        for sentence_obj in tqdm.tqdm(sentence_objs):
            sentence = sentence_obj["sentence"] # type: str

            if any([i in sentence for i in pass_words]):
                continue

            if sentence.isalnum():
                continue
                
            seg_sentence = jieba.cut(sentence.replace("\t", " ").replace("\n", " "))
            
            outline = " ".join(seg_sentence)
            outline = outline + " "

            f.write(outline)
            f.flush()


def train_model():
	”“”训练词向量模型并保存“”“
    model = fasttext.train_unsupervised('finance_news_cut.txt', )
    model.save_model("news_fasttext.model.bin")
    

def get_word_vector(word):
	”“”获取某词词向量“”“
    model = fasttext.load_model('news_fasttext.model.bin')

    word_vector = model.get_word_vector(word)

    return word_vector

def get_sentence_vector(sentence):
	”“”获取某句句向量“”“
    cut_words = jieba.lcut(sentence)

    sentence_vector = None
    
    for word in cut_words:
        word_vector = get_word_vector(word)

        if sentence_vector is not None:
            sentence_vector += word_vector
        else:
            sentence_vector = word_vector

    sentence_vector = sentence_vector / len(cut_words)

    return sentence_vector


def cos_sim(vector_a, vector_b):
    """
    计算两个向量之间的余弦相似度
    :param vector_a: 向量 a
    :param vector_b: 向量 b
    :return: sim
    """
    vector_a = np.mat(vector_a)
    vector_b = np.mat(vector_b)
    num = float(vector_a * vector_b.T)
    denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
    cos = num / denom
    sim = 0.5 + 0.5 * cos
    return sim

if __name__ == "__main__":
    a = get_sentence_vector("可以包邮吗")
    b = get_sentence_vector("能不能包邮")
    print(cos_sim(a, b))
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

泡泡码客

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值