爬虫实战——住建 政策 爬取

import json
import os
import re
from datetime import datetime, timedelta

import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup

# 获取昨天的时间和今天的时间
def get_today_and_lastday():
    t = datetime.now()
    oneday = timedelta(days=1)
    yesterday = t - oneday
    return datetime(year=t.year, month=t.month, day=t.day), datetime(
        year=yesterday.year, month=yesterday.month, day=yesterday.day
    )

# 字符串转换时间格式
def str_to_datetime(s):
    return datetime.strptime(s, "%Y-%m-%d %H:%M:%S")

# 获取给定截止时间后的li标签列表
def get_l_list(url, end_time, page=1):
    if page != 1:
        crawl_url = url + "_%s" % page + ".htm"
    else:
        crawl_url = url + ".htm"
    request = requests.get(crawl_url, headers=head)
    soup = BeautifulSoup(request.text, "html.parser")
    article_list = soup.find_all("ul", {"class": "news-list clearfix"})[0]
    li_list = article_list.find_all("li")
    publish_time = str_to_datetime(li_list[-1].find_all("em")[0].text + " 00:00:00")
    if publish_time < end_time:
        return li_list
    page += 1
    li_list += get_l_list(url, end_time, page)
    return li_list

# 爬取li标签下跳转链接信息
def crawl_li_list(li_list, db_insert_data):
    for li in li_list:
        publish_time = str_to_datetime(li.find_all("em")[0].text + " 00:00:00")
        if end_time <= publish_time < begin_time:
            a_list = li.find_all("a")
            for a in a_list:
                a_url = a.get("href")
                if a_url[0] != "/":
                    continue
                a_request = requests.get(init_url + a_url, headers=head)
                a_request.encoding = "gb2312"
                a_soup = BeautifulSoup(a_request.text, "html.parser")
                img_url = [
                    init_url + j.get("src")
                    for i in a_soup.find_all("div", {"class": "editor-body"})
                    for j in i.find_all("img")
                ]
                attach_url = [
                    init_url + j.get("href")
                    for i in a_soup.find_all("div", {"class": "editor-body"})
                    for j in i.find_all("a")
                ]
                file_path = img_url + attach_url
                db_insert_data.append(
                    [
                        # crawl_time
                        datetime.now(),
                        # publish_time
                        str_to_datetime(
                            a_soup.find_all("div", {"class": "info"})[0]
                            .find_all("span")[0]
                            .text.split(":")[1]
                            .replace("/", "-")
                        ),
                        # 原始网址
                        init_url + a_url,
                        # 网站模块
                        a_soup.find_all("div", {"class": "location"})[0]
                        .find_all("a")[-1]
                        .text,
                        # 标题
                        a_soup.find_all("h1")[0].text,
                        # 作者或来源
                        "陕西省住房和城乡建设局",
                        # 文章内容
                        "".join(
                            [
                                i.text
                                for i in a_soup.find_all(
                                    "div", {"class": "editor-body"}
                                )
                            ]
                        ),
                        # 附件 存储附件地址 media/app03/data/
                        "\n".join(file_path),
                    ]
                )


begin_time, end_time = get_today_and_lastday()
head = {
    "User-Agent": np.random.choice(
        [
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/13.0.782.215)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/11.0.696.57)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.1; SV1; .NET CLR 2.8.52393; WOW64; en-US)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; chromeframe/11.0.696.57)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/4.0; GTB7.4; InfoPath.3; SV1; .NET CLR 3.1.76908; WOW64; en-US)",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A",
            "Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10",
            "Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3",
        ],
    ),
    "Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
}

init_url = "http://js.shaanxi.gov.cn"
url_list = [
    "http://js.shaanxi.gov.cn/zixun/list2006",
    "http://js.shaanxi.gov.cn/zixun/list2080",
    "http://js.shaanxi.gov.cn/zixun/list2077",
    "http://js.shaanxi.gov.cn/zixun/list2010",
]


def spider():
    db_insert_data = []
    for url in url_list:
        li_list = get_l_list(url, end_time)
        crawl_li_list(li_list, db_insert_data)
    return db_insert_data

输出结果:
在这里插入图片描述

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
好的,我可以为您提供爬虫教程,以下是使用Scrapy框架豆瓣电影TOP250的步骤: 1. 创Scrapy项目 首先需要使用Scrapy命令行工具创一个Scrapy项目,可以使用以下命令: ``` scrapy startproject douban_top250 ``` 该命令将会在当前目录下创一个名为douban_top250的Scrapy项目。 2. 创Spider 接下来需要创一个Spider来定义的规则,可以使用以下命令在Scrapy项目的spiders目录下创一个名为douban_spider的Spider: ``` scrapy genspider douban_spider movie.douban.com ``` 在spiders目录下会生成一个名为douban_spider.py的文件,我们可以在该文件中定义规则。 3. 编写Spider规则 在douban_spider.py文件中,我们需要定义如何豆瓣电影TOP250的规则,以下是一个简单的例子: ```python import scrapy from scrapy import Selector from douban_top250.items import DoubanTop250Item class DoubanSpider(scrapy.Spider): name = "douban_spider" allowed_domains = ["movie.douban.com"] start_urls = [ "https://movie.douban.com/top250" ] def parse(self, response): selector = Selector(response) item_list = selector.xpath('//ol[@class="grid_view"]/li') for item in item_list: douban_item = DoubanTop250Item() douban_item['rank'] = item.xpath('div[@class="pic"]/em/text()').extract()[0] douban_item['title'] = item.xpath('div[@class="info"]/div[@class="hd"]/a/span[@class="title"]/text()').extract()[0] douban_item['rating'] = item.xpath('div[@class="info"]/div[@class="bd"]/div[@class="star"]/span[@class="rating_num"]/text()').extract()[0] douban_item['quote'] = item.xpath('div[@class="info"]/div[@class="bd"]/p[@class="quote"]/span[@class="inq"]/text()').extract()[0] yield douban_item ``` 在上述代码中,我们定义了一个名为DoubanSpider的Spider,并定义了一些规则: - allowed_domains:定义允许的域名; - start_urls:定义爬虫开始的URL列表; - parse:定义如何解析响应结果,生成Item对象。 4. 定义Item 在上述代码中,我们定义了一个名为DoubanTop250Item的Item,需要在douban_top250/items.py文件中定义该Item,以下是一个简单的例子: ```python import scrapy class DoubanTop250Item(scrapy.Item): rank = scrapy.Field() title = scrapy.Field() rating = scrapy.Field() quote = scrapy.Field() ``` 在上述代码中,我们定义了DoubanTop250Item包含以下字段: - rank:电影排名; - title:电影名称; - rating:电影评分; - quote:电影的经典语录。 5. 运行Spider 在完成上述步骤后,就可以运行Spider开始豆瓣电影TOP250了,可以通过以下命令来运行Spider: ``` scrapy crawl douban_spider -o douban_top250.csv ``` 该命令将会运行名为douban_spider的Spider,并将结果保存到douban_top250.csv文件中。 以上就是使用Scrapy豆瓣电影TOP250的基本步骤,希望能对您有所帮助。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值