python爬虫 | 采集文章时自动替换标题

采集文章时自动替换标题,非伪原创标题,而是与原标题相关相似替换(下拉词|相关词)

from lxml import etree
import re, os, json
import requests
import random

# 请求时随机选一个UA
def header():
    head = [
        {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:87.0) Gecko/20100101 Firefox/87.0"},
        {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:84.0) Gecko/20100101 Firefox/84.0"},
        {"User-Agent": "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130331 Firefox/21.0"},
        {"User-Agent": "Opera/9.80 (Windows NT 6.1; U; en-GB) Presto/2.7.62 Version/11.00"},
        {"User-Agent": "Opera/9.80 (Windows NT 6.1 x64; U; en) Presto/2.7.62 Version/11.00"},
        {'User-Agent': "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.7.39 Version/11.00"}
    ]

    return random.choice(head)

pass


# 文本内容写入函数
def local(title, article):
    heading = translate(title)
    heading = re.sub(r'(”|?|!|“|:|,|【|】|《|》|\s|\*|@|#|%|=|\?|;|`|、|/|{|}|\\|\^|&|~|:|—|\+|")', '', heading)
    path = os.path.join(os.path.expanduser('~'), "Desktop", '采集库')
    if os.path.exists(path):
        with open(str(os.path.join(path, f'{heading}.txt')), 'w', encoding='utf-8') as f:
            f.write(article)
            print(f'【{title}】保存成功了')
    else:
        # 桌面创建‘采集库’目录文件
        os.makedirs(str(os.path.join(os.path.expanduser('~'), "Desktop", '采集库')))
        # 在‘采集库’目录写入txt文件
        with open(str(os.path.join(path, f'{heading}.txt')), 'w', encoding='utf-8') as f:
            f.write(article)
            print(f'【{title}】保存成功了')

pass

#百度下拉词
def translate(title):
    response = requests.get(f'http://suggestion.baidu.com/su?wd={title}', headers=header())
    response = response.text[17:-2].replace('q:', '"q":').replace('p:false', '"p":"false"').replace('s:',
                                                                                                    '"s":').replace(
        'p:true', '"p":"true"')
    data = json.loads(response)
    if data['s']:
        random_title = random.choice(data['s'])
        random_title_two = random.choice(data['s'])
        if len(random_title) < 12:
            heading = random_title + '(' + random_title_two + ')'
            return heading
        else:
            heading = random_title
            return heading
    else:
        heading = title
        return heading


pass


以上python代码不需要修改,

# 爬虫代码(太平洋亲子为例)
def get_html(num):
    headers = header()
    for i in range(1, num):
        url = f'https://edu.pcbaby.com.cn/brain/family/index_{i}.html'
        html = requests.get(url, headers=headers)
        html = etree.HTML(html.text)
        # 提取列表链接
        lis = html.xpath('//div[@id="JaList"]/ul/li/dl[@class="aListDl"]/dd[@class="oh"]/p[@class="aList-title"]/a/@href')
        for i in lis:
            url = f'https:{i}'
            print('采集链接成功:', url)
            headers = header()
            html = requests.get(url, headers=headers)
            html.encoding = 'gbk'
            if html.status_code != 404:
                html = etree.HTML(html.text)
                # 解析内容页标题(根据网站标签自行修改)
                title = html.xpath('//h1[@class="artTit"]')[0].xpath('string(.)')

                # 解析内容页文章(根据网站标签自行修改)
                article = html.xpath('//div[@class="artText"]')[0].xpath('string(.)')

                title_sub = re.sub(r'(”|?|!|“|:|,|【|】|《|》|\s|(|)|\(|\)|\*|@|#|%|=|\?|;|`|、)', '', title)
                title = title_sub.replace('-', '至').replace('~', '至').replace('~', '至').replace('|', '').replace('-',
                                                                                                                 '至').replace(
                    '?', '').replace('?', '')
                local(title, article)
            else:
                print('URL打不开,跳过该页面')
                continue
pass



if __name__ == '__main__':
    #爬取页数
    get_html(99)
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值