新闻搜索引擎流程示例demo1.0(南方网)

只是一个简单的倒排表+余弦相似度的demo
想要提高效率则可以对搜索词的倒排表做并集处理
待测试其他匹配算法效果组合
两个版本,特征向量编码与TF-ITF版

# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import numpy as np
import torch
import re
import jieba
import newspaper
import pandas as pd
from tqdm import tqdm
import os.path
from os import listdir
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',100)


# %%
url = 'https://new.qq.com/omn/20200730/20200730A0TCON00.html'      # 南方网
south_paper = newspaper.build(url, language='zh')    


# %%
url = 'http://www.southcn.com/'          # 南方网

# %% [markdown]
# # 寻找文件

# %%
def find_file(key_word,dir = os.getcwd()):
    file_paths = [os.path.join(dir, f) for f in listdir(dir) if os.path.isfile(os.path.join(dir, f)) and key_word in os.path.join(dir, f)][0]
    return file_paths

# %% [markdown]
# # 摘取新闻

# %%
def get_news_paper(url,filepath):
    south_paper = newspaper.build(url,language='zh',memoize_articles = False)    # 构建新闻源
    strings = "{}{}{}{}{}{}".format("品牌:",south_paper.brand,"描述:",south_paper.description,"共计:",len(south_paper.articles))
    news_title = []
    news_text = []
    news = south_paper.articles
    for i in tqdm(range(len(news)),desc=strings):    # 以新闻链接的长度为循环次数
        paper = news[i]
        try :
            paper.download()
            paper.parse()
            news_title.append(paper.title)     # 将新闻题目以列表形式逐一储存
            news_text.append(paper.text)       # 将新闻正文以列表形式逐一储存
        except:
            news_title.append('NULL')          # 如果无法访问,以NULL替代
            news_text.append('NULL')          
            continue
    # 建立数据表存储爬取的新闻信息
    south_paper_data = pd.DataFrame({'title':news_title,'text':news_text})
    south_paper_data = south_paper_data.drop_duplicates(subset=['text'], keep ='first')
    south_paper_data.reset_index(drop=True)
    south_paper_data.to_csv(filepath,mode="a+",header=False)
    print("{}{}{}".format("共计采集到<",news.shape[0],">篇新闻"))
    return south_paper_data

# %% [markdown]
# # 静态配置

# %%
corpus = find_file("南方网 3.csv")
stop_word_path = find_file("stop_word_for_chinese.txt","/Users/manmanzhang/Library/Mobile Documents/com~apple~CloudDocs/MyProject/InferenceSystem/src/I5_algorithm/NLP数据集合/停词库/")
stop_word_path

# %% [markdown]
# # 本地csv去重

# %%
def drop_duplicates_csv(file_path):
    location_table = pd.read_csv(file_path)
    start = location_table.shape[0]
    location_table = location_table.drop_duplicates(subset=['text'], keep ='first')
    location_table = location_table.reset_index(drop=True)
    location_table.to_csv(file_path)
    end = location_table.shape[0]
    return start-end
drop_duplicates_csv(corpus)


# %%


# %% [markdown]
# # 开始采集新闻数据

# %%
news = get_news_paper(url,corpus)

# %% [markdown]
# # 数据预处理

# %%
#临时删除文本元素
def del_element(strings,symbles):
    srcrep = {i:'' for i in symbles }
    rep = dict((re.escape(k), v) for k, v in srcrep.items())
    pattern = re.compile("|".join(rep.keys()))
    return pattern.sub(lambda m: rep[re.escape(m.group(0))], strings)

#加载停用词
stop_words = stop_words = open(stop_word_path,'r').read().split('\n')+['\n']

#过滤停用词
def filter_stop_word(paper,stop_words):
    return np.array(list(filter(lambda x: x not in stop_words,jieba.cut(del_element(paper,'\n')))))

#读取本地新闻
def read_txt(corpus):
    return np.array([re.sub('\n','',str(word)) for word in tqdm(pd.read_csv(corpus).text,desc='加载文章')])

#只要中文
def just_chinese(strings):
    regStr = ".*?([\u4E00-\u9FA5]+).*?"
    expr = ''.join(re.findall(regStr, strings))
    if expr:
        return expr
    return '\n'

#分词
def split_word(original,temp_del=stop_words):
    result = []
    for paper in tqdm(original,desc='分词文章'):
        chinese = just_chinese(paper)
        temp_split_words = filter_stop_word(chinese,stop_words)
        result.append(temp_split_words)
    return np.array(result)

# 排序字典
def sort_dict(dict_items):
    sorted_tuple = np.array(sorted(dict_items.items(), key=lambda x: x[0], reverse=True))
    return dict(zip(sorted_tuple[:,0],sorted_tuple[:,1]))

'''数据预处理函数'''
def data_preprocessing(corpus):
    # 读取原文
    read_original = read_txt(corpus) 
    # 倒入文章并分词
    init_paper = split_word(read_original,stop_words)
    # 所有单词降维到一维
    all_words = np.array([j for i in tqdm(init_paper,desc='词列表降维') for j in i])
    # 单词去重
    word_vector = np.unique(all_words)
    # 测量共有词汇量
    m = all_words.size
    init_word_dict = {word:(all_words==word).dot(np.ones(m))/m for word in tqdm(word_vector,desc='构建频率词典')}
    #构建排序字典和特征向量 
    word_dict = sort_dict(init_word_dict)
    word_vector = np.array(list(word_dict)) 
    return word_dict,word_vector,read_original,init_paper


# %%
word_dict,word_vector,read_original,init_paper = data_preprocessing(corpus)

# %% [markdown]
# # TF-ITF 词向量

# %%
def TF(paper_words,word_vector):
    m = word_vector.size
    init_TF = np.zeros(m)
    for word in paper_words:
        if word in word_vector:
            index_ = np.argwhere(word_vector==word)[0][0]
            init_TF[index_] += 1
    return init_TF

def IDF(paper_words_list,word_vector):
    m = word_vector.size
    init_IDF = np.zeros(m)
    N = paper_words_list.shape
    n = -1
    for word in tqdm(word_vector,desc = 'IDF词汇'):
        n += 1
        for paper_arr in paper_words_list:
            if word in paper_arr:
                init_IDF[n] += 1
    return np.log(N/(init_IDF+1))

def TFIDF(paper_words_list,word_vector):
    IDF_arr = IDF(init_paper,word_vector)
    TF_arr = np.array([TF(paper,word_vector) for paper in tqdm(paper_words_list,desc = 'TF矩阵')])
    return TF_arr*IDF_arr,IDF_arr

# %% [markdown]
# # 对数据源做TFIDF编码

# %%
code_of_TFIDF,IDF = TFIDF(init_paper,word_vector)

# %% [markdown]
# # 特征字典编辑器

# %%
# 构建文章频率特征词向量
def feature_dictionary_editor(words):
    words_list = list(word_dict) #特征向量
    feature_dict = dict(zip(words_list,np.zeros(len(words_list)))) # 特征字典
    for word in words:
        if word in words_list:
            feature_dict[word]+=1
    return np.array([frequency for word,frequency in feature_dict.items()])


# %%
code_of_TFIDF.shape


# %%
arr = init_paper[2]


# %%
def try_index(n,arr):
    try:
        return (init_paper[n]==arr).all()
    except Exception:
        return False

def find_index(arr):
    for i in range(init_paper.shape[0]):
        if try_index(i,arr):
            return i
find_index(arr)

# %% [markdown]
# # 构造倒排表

# %%
def inverted_index(paper,word_vector):
    result = dict()
    n = -1
    for i in tqdm(paper,desc='倒排表当前排序的文章'):
        n += 1
        for j in i:
            if j in word_vector:
                if j in result:
                    result[j] = result[j]+[n]
                else:
                    result.update({j:[n]})
    return {i:list(set(result[i])) for i in result}

Inverted_Index_List = inverted_index(init_paper,word_vector)

# %% [markdown]
# # 搜索引擎模块

# %%
# 搜索倒排表
def search_inverted_index(strings,Inverted_Index_List):
    words_for_search = []
    split_word_for_search = [word for word in jieba.cut_for_search(strings) if word not in stop_words]
    print(split_word_for_search)
    for word in split_word_for_search:
        if word in Inverted_Index_List:
            print("\n搜索单词:",word,"\n文章序列:",Inverted_Index_List[word])
            words_for_search+=Inverted_Index_List[word]
    return np.unique(np.array(words_for_search)),split_word_for_search

#余弦相似度
def cosine(s1,s2):
    return s1.dot(s2)/(np.linalg.norm(s1) * np.linalg.norm(s2))

# %% [markdown]
# # 特征向量搜索入口函数

# %%

def search(key,Inverted_Index_List):
   search_paper_index,search_word = search_inverted_index(key,Inverted_Index_List)
   search_result = []
   search_prob = feature_dictionary_editor(search_word) #搜索内容的词向量
   change_word_vector_from_words = init_paper[search_paper_index]
   change_paper_from_words = read_original[search_paper_index]
   for i in tqdm(range(len(change_paper_from_words)),desc='已经搜索数量'):
       word_arr,paper = change_word_vector_from_words[i],change_paper_from_words[i]
       paper_prob = feature_dictionary_editor(word_arr) #倒排表当前文章的词向量
       cos = cosine(paper_prob,search_prob) #余弦相似度
       parameter = cos 
       search_result.append([cos,paper])

   search_result_arr = np.array(search_result)
   result_table = pd.DataFrame({"cos":search_result_arr[:,0],"newspaper":search_result_arr[:,1]})
   sort_table = result_table.sort_values(["cos"],ascending=False).reset_index(drop=True)
   file_path = "{}{}{}".format(os.getcwd(),"/",key)
   sort_table.to_csv(file_path)
   return sort_table


# %%
TFsearch = search("字节跳动",Inverted_Index_List)
TFsearch

# %% [markdown]
# # ITITF 向量搜索入口函数 

# %%

def search_TFITF(key,Inverted_Index_List):
    search_paper_index,search_word = search_inverted_index(key,Inverted_Index_List)
    search_result = [] 
    TFITF_search = TF(search_word,word_vector)*IDF #搜索内容的词向量
    change_word_vector_from_words = init_paper[search_paper_index]
    change_paper_from_words = read_original[search_paper_index]
    for i in tqdm(range(len(change_paper_from_words)),desc='已经搜索数量'):
        word_arr,paper = change_word_vector_from_words[i],change_paper_from_words[i]
        TFIDF_ROW = code_of_TFIDF[find_index(word_arr)]#倒排表当前文章的词向量z s
        cos = cosine(TFIDF_ROW,TFITF_search) #余弦相似度
        parameter = cos 
        search_result.append([cos,paper])
    search_result_arr = np.array(search_result)
    result_table = pd.DataFrame({"cos":search_result_arr[:,0],"newspaper":search_result_arr[:,1]})
    sort_table = result_table.sort_values(["cos"],ascending=False).reset_index(drop=True)
    file_path = "{}{}{}".format(os.getcwd(),"/",key)
    sort_table.to_csv(file_path)
    return sort_table

TFITFsearch  = search_TFITF(input(),Inverted_Index_List)
TFITFsearch


# %%





  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值