采集文章时自动替换标题,非伪原创标题,而是与原标题相关相似替换(下拉词|相关词)
from lxml import etree
import re, os, json
import requests
import random
# 请求时随机选一个UA
def header():
head = [
{"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:87.0) Gecko/20100101 Firefox/87.0"},
{"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:84.0) Gecko/20100101 Firefox/84.0"},
{"User-Agent": "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130331 Firefox/21.0"},
{"User-Agent": "Opera/9.80 (Windows NT 6.1; U; en-GB) Presto/2.7.62 Version/11.00"},
{"User-Agent": "Opera/9.80 (Windows NT 6.1 x64; U; en) Presto/2.7.62 Version/11.00"},
{'User-Agent': "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.7.39 Version/11.00"}
]
return random.choice(head)
pass
# 文本内容写入函数
def local(title, article):
heading = translate(title)
heading = re.sub(r'(”|?|!|“|:|,|【|】|《|》|\s|\*|@|#|%|=|\?|;|`|、|/|{|}|\\|\^|&|~|:|—|\+|")', '', heading)
path = os.path.join(os.path.expanduser('~'), "Desktop", '采集库')
if os.path.exists(path):
with open(str(os.path.join(path, f'{heading}.txt')), 'w', encoding='utf-8') as f:
f.write(article)
print(f'【{title}】保存成功了')
else:
# 桌面创建‘采集库’目录文件
os.makedirs(str(os.path.join(os.path.expanduser('~'), "Desktop", '采集库')))
# 在‘采集库’目录写入txt文件
with open(str(os.path.join(path, f'{heading}.txt')), 'w', encoding='utf-8') as f:
f.write(article)
print(f'【{title}】保存成功了')
pass
#百度下拉词
def translate(title):
response = requests.get(f'http://suggestion.baidu.com/su?wd={title}', headers=header())
response = response.text[17:-2].replace('q:', '"q":').replace('p:false', '"p":"false"').replace('s:',
'"s":').replace(
'p:true', '"p":"true"')
data = json.loads(response)
if data['s']:
random_title = random.choice(data['s'])
random_title_two = random.choice(data['s'])
if len(random_title) < 12:
heading = random_title + '(' + random_title_two + ')'
return heading
else:
heading = random_title
return heading
else:
heading = title
return heading
pass
以上python代码不需要修改,
# 爬虫代码(太平洋亲子为例)
def get_html(num):
headers = header()
for i in range(1, num):
url = f'https://edu.pcbaby.com.cn/brain/family/index_{i}.html'
html = requests.get(url, headers=headers)
html = etree.HTML(html.text)
# 提取列表链接
lis = html.xpath('//div[@id="JaList"]/ul/li/dl[@class="aListDl"]/dd[@class="oh"]/p[@class="aList-title"]/a/@href')
for i in lis:
url = f'https:{i}'
print('采集链接成功:', url)
headers = header()
html = requests.get(url, headers=headers)
html.encoding = 'gbk'
if html.status_code != 404:
html = etree.HTML(html.text)
# 解析内容页标题(根据网站标签自行修改)
title = html.xpath('//h1[@class="artTit"]')[0].xpath('string(.)')
# 解析内容页文章(根据网站标签自行修改)
article = html.xpath('//div[@class="artText"]')[0].xpath('string(.)')
title_sub = re.sub(r'(”|?|!|“|:|,|【|】|《|》|\s|(|)|\(|\)|\*|@|#|%|=|\?|;|`|、)', '', title)
title = title_sub.replace('-', '至').replace('~', '至').replace('~', '至').replace('|', '').replace('-',
'至').replace(
'?', '').replace('?', '')
local(title, article)
else:
print('URL打不开,跳过该页面')
continue
pass
if __name__ == '__main__':
#爬取页数
get_html(99)