import json
import os
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
# 获取昨天的时间和今天的时间
def get_today_and_lastday():
t = datetime.now()
oneday = timedelta(days=1)
yesterday = t - oneday
return datetime(year=t.year, month=t.month, day=t.day), datetime(
year=yesterday.year, month=yesterday.month, day=yesterday.day
)
# 字符串转换时间格式
def str_to_datetime(s):
return datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
# 获取给定截止时间后的li标签列表
def get_l_list(url, end_time, page=1):
if page != 1:
crawl_url = url + "_%s" % page + ".htm"
else:
crawl_url = url + ".htm"
request = requests.get(crawl_url, headers=head)
soup = BeautifulSoup(request.text, "html.parser")
article_list = soup.find_all("ul", {"class": "news-list clearfix"})[0]
li_list = article_list.find_all("li")
publish_time = str_to_datetime(li_list[-1].find_all("em")[0].text + " 00:00:00")
if publish_time < end_time:
return li_list
page += 1
li_list += get_l_list(url, end_time, page)
return li_list
# 爬取li标签下跳转链接信息
def crawl_li_list(li_list, db_insert_data):
for li in li_list:
publish_time = str_to_datetime(li.find_all("em")[0].text + " 00:00:00")
if end_time <= publish_time < begin_time:
a_list = li.find_all("a")
for a in a_list:
a_url = a.get("href")
if a_url[0] != "/":
continue
a_request = requests.get(init_url + a_url, headers=head)
a_request.encoding = "gb2312"
a_soup = BeautifulSoup(a_request.text, "html.parser")
img_url = [
init_url + j.get("src")
for i in a_soup.find_all("div", {"class": "editor-body"})
for j in i.find_all("img")
]
attach_url = [
init_url + j.get("href")
for i in a_soup.find_all("div", {"class": "editor-body"})
for j in i.find_all("a")
]
file_path = img_url + attach_url
db_insert_data.append(
[
# crawl_time
datetime.now(),
# publish_time
str_to_datetime(
a_soup.find_all("div", {"class": "info"})[0]
.find_all("span")[0]
.text.split(":")[1]
.replace("/", "-")
),
# 原始网址
init_url + a_url,
# 网站模块
a_soup.find_all("div", {"class": "location"})[0]
.find_all("a")[-1]
.text,
# 标题
a_soup.find_all("h1")[0].text,
# 作者或来源
"陕西省住房和城乡建设局",
# 文章内容
"".join(
[
i.text
for i in a_soup.find_all(
"div", {"class": "editor-body"}
)
]
),
# 附件 存储附件地址 media/app03/data/
"\n".join(file_path),
]
)
begin_time, end_time = get_today_and_lastday()
head = {
"User-Agent": np.random.choice(
[
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/13.0.782.215)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/11.0.696.57)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.1; SV1; .NET CLR 2.8.52393; WOW64; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; chromeframe/11.0.696.57)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/4.0; GTB7.4; InfoPath.3; SV1; .NET CLR 3.1.76908; WOW64; en-US)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A",
"Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10",
"Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3",
],
),
"Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
}
init_url = "http://js.shaanxi.gov.cn"
url_list = [
"http://js.shaanxi.gov.cn/zixun/list2006",
"http://js.shaanxi.gov.cn/zixun/list2080",
"http://js.shaanxi.gov.cn/zixun/list2077",
"http://js.shaanxi.gov.cn/zixun/list2010",
]
def spider():
db_insert_data = []
for url in url_list:
li_list = get_l_list(url, end_time)
crawl_li_list(li_list, db_insert_data)
return db_insert_data
输出结果: