医学词向量训练---语料获取

医学词向量训练—语料获取

本项目基于39net问答进行医学词向量训练,将问题作为语料,将标签补充到词库中,基于word2vec训练词向量。
39问医生语料

语料爬虫

代码地址

# encoding: utf-8
import re
import requests
from bs4 import BeautifulSoup
import json
import time
import os


class spider39question:
    """
    爬取问卷网
    """

    def __init__(self):
        self.department = {"内科": "313", "外科": "321", "妇产科": "320", "儿科": "309", "男科": "322", "中医科": "3163",
                           "五官科": "323", "不孕不育": "3157", "皮肤性病科": "319465592", "精神心理科": "3166", "肿瘤科": "3162",
                           "减肥": "27", "整形美容": "3165", "传染病科": "311"}
        self.path = '../../词向量/data/39url/'
        self.save_path = '../../词向量/data/content/'
        self.headers = {

            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,'
                      '*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            "Host": "ask.39.net",
            "Pragma": "no-cache",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/86.0.4240.111 Safari/537.36"}

    def get_url(self):
        """
        获取问答url
        :return:
        """
        for key, value in self.department.items():
            result = []
            for i in range(1, 1001):
                try:
                    response = requests.get("http://ask.39.net/news/" + value + "-" + str(i) + ".html",
                                            headers=self.headers)
                    soup = BeautifulSoup(response.text, 'html.parser')
                    for element in soup.find_all(class_="list_ask list_ask2"):
                        for element_one in element.find_all("li"):
                            for element_two in element_one.find_all(class_="a_l"):
                                for element_three in element_two.find_all(class_="p1"):
                                    for element_four in element_three.find_all("a"):
                                        result.append("http://ask.39.net" + element_four["href"])
                    print(str(i) + "++++++++++++成功")
                except Exception as e:
                    print(e)
                    print(str(i) + "++++++++++++失败")
                time.sleep(0.1)
            with open(self.path + key + '.json', "w",
                      encoding="utf8", errors="ignore") as dump_f:
                json.dump({key: result}, dump_f, ensure_ascii=False, indent=2)
            print(key + "---------------------已完成")

    def get_single(self, url):
        result = {"type": [], "title": '', "content": '', "key_word": []}
        response = requests.get(url,
                                headers=self.headers, timeout=5)
        soup = BeautifulSoup(response.text, 'html.parser')
        type_ = []
        for element in soup.find_all(class_="cont_l"):
            for element_one in element.find_all(class_="sub"):
                for element_two in element_one.find_all("span"):
                    for element_three in element_two.find_all("a"):
                        type_.append(element_three.get_text())
            for element_four in element.find_all(class_="ask_cont"):
                for element_five in element_four.find_all(class_="ask_tit"):
                    result["title"] = re.sub("[\n\r\t ]", "", element_five.get_text())
                for element_six in element_four.find_all(class_="ask_hid"):
                    result["content"] = re.sub("[\n\r\t ]", "", element_six.get_text())
            for element_seven in element.find_all(class_="txt_label"):
                for element_eight in element_seven.find_all("a"):
                    result["key_word"].append({"url": "http://ask.39.net" + element_eight['href'],
                                               "label": re.sub("[\n\r\t ]", "", element_eight.get_text())})
        result["type"] = type_[1:]
        return result

    def get_all(self):
        for file in os.listdir(self.path):
            with open(self.path + file, encoding='utf8') as data_json:
                data_json = json.load(data_json)[file.replace(".json", "")]  # 36
                os.makedirs(self.save_path + file.replace(".json", ""))
                for count, element in enumerate(data_json):
                    try:
                        result = self.get_single(element)
                        with open(self.save_path + file.replace(".json", "") + '/' + str(count) + '.json', "w",
                                  encoding="utf8", errors="ignore") as dump_f:
                            json.dump(result, dump_f, ensure_ascii=False, indent=2)
                        print(str(count) + "---------------------已完成")
                    except Exception as e:
                        print(e)
                        print(str(count) + "--------------------- 失败")
                    time.sleep(0.1)


if __name__ == '__main__':
    spider39question().get_all()

结果不方便挂出,有需要的请联系我

评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

西门废物

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值