人民日报--全站图文数据库信息采集

支持2020-2023所有图文数据采集
在这里插入图片描述

#!/usr/bin/python3
# -*- coding:utf-8 -*-
import requests
import re
from lxml import etree
from sdk.utils.util_decorate import retry

@retry(retry=3,sleep=5)
def get_html(url):
    response = requests.get(url)
    response.encoding = "utf-8"
    if response.status_code == 200:
        return response.text
    else:
        print(response.status_code)
        return "ERROR"

def get_text(text):
    if isinstance(text,str):
        return re.sub("\\r|\\n|\\t| | ", "", text).strip(" ")
    elif isinstance(text,list):
        return "".join([re.sub("\\r|\\n|\\t| | ", "", i).strip(" ") for i in text])


def anlise_detail(detail_html):
    tree = etree.HTML(detail_html)
    lis = tree.xpath('//div[@class="article"]|//div[@class="text_c"]')
    for li in lis:
        title = get_text(li.xpath('./h1/text()'))
        print("标题",title)
        title2 = get_text(li.xpath('./h2/text()')).strip("\n")
        if title2:
            print("副标题",title2)
        pusblish_info = get_text(li.xpath('.//span[@class="date"]/text()|//div[@class="lai"]//text()'))
        print("文章信息",pusblish_info)
        content = get_text(li.xpath('.//div[@id="ozoom"]//p/text()'))
        print(content)
        img_list = [i.replace("../../../", "http://paper.people.com.cn/rmrb/") if not i.startswith("http://") else i for
                    i in
                    li.xpath('.//img/@src')]
        if img_list:
            print(img_list)


year_list = [str(i) for i in range(2020,2024)]
month_list = [str(i).zfill(2) for i in range(1,13)]
day_list = [str(i).zfill(2) for i in range(1,32)]

for year in year_list:
    for month in month_list:
        for day in day_list:
            head = "http://paper.people.com.cn/rmrb/html/{}-{}/{}/".format(year,month,day)
            for i in range(1,21):
                url = "{}nbs.D110000renmrb_{}.htm".format(head,str(i).zfill(2))
                # print(url)
                html = get_html(url)["msg"]
                if html != "ERROR":
                    tree = etree.HTML(html)
                    lis = tree.xpath('//div[@class="news"]/ul|//div[@id="titleList"]/ul')
                    for li in lis:
                        detail_url_list = li.xpath('./li/a/@href')
                        name_list = li.xpath('./li/a//text()')
                        for name,_url in zip(name_list, detail_url_list):
                            detail_url = "{}{}".format(head,_url)
                            name = re.findall('document\.write\(view\(\"(.*?)\"\)\)',name)[0].strip()
                            print(name,detail_url)
                            detail_html = get_html(detail_url)["msg"]
                            if detail_html != "ERROR":
                                anlise_detail(detail_html)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值