高清图 import os import requests from bs4 import BeautifulSoup from access.sprider.SpriderAccess import SpriderAccess from base.BaseConfig import BaseConfig from base.BaseFrame import BaseFrame from business.sprider.DownLoadFile import DownLoadFile from business.sprider.UserAgent import UserAgent from object.entity.SpriderEntity import SpriderEntity from plugin.Tools import Tools class Chidianfan(): page_count = 1 # 每个栏目开始业务 base_url = "http://www.chidianfan.com" # 采集的网址 save_path = BaseConfig().CORPUS_ROOT + os.sep + "Chidian" # 顶级目录 dict_column_list = ["nenmo"] # 遍历的栏目 def __init__(self): Tools.judge_diskpath_exits_create(self.save_path) pass def sprider(self): BaseFrame.__log__("开始采集ChidianFan网站图片...") for url in self.dict_column_list: number = SpriderAccess().find_pager_by_url_and_type(self.base_url, url) if number > 1: self.page_count = number BaseFrame.__log__("本次采集从分页" + str(number) + "开始。") next_url = self.base_url + "/" + url + "/" try: response = requests.get(next_url, timeout=10, headers=UserAgent().get_random_header(next_url)) response.encoding = 'UTF-8' soup = BeautifulSoup(response.text, "html5lib") except Exception as e: BaseFrame.__err__("请求一级栏目出现错误,倒霉...。" + str(e)) continue laster_pager_url = soup.find('a', text='末页').get("href") page_end_number = int(laster_pager_url.split(".")[0].split("_")[2]) while self.page_count <= int(page_end_number): # 翻完停止 if self.page_count == 1: div_pic_list = soup.find_all('div', attrs={"class": 'pic'}) for childer in div_pic_list: next_url = self.base_url + childer.find('a', attrs={"target": '_blank'}).get("href") title = childer.find('a', attrs={"target": '_blank'}).get("title") self.sprider_detail(next_url, title, url, self.page_count) # 请请明细页面 else: next_page_url = self.base_url + "/" + url + "/list_1_{0}.html".format(self.page_count) try: response = requests.get(next_page_url, timeout=10, headers=UserAgent().get_random_header(next_page_url)) response.encoding = 'UTF-8' soup = BeautifulSoup(response.text, "html5lib") div_pic_list = soup.find_all('div', attrs={"class": 'pic'}) for childer in div_pic_list: next_url = self.base_url + childer.find('a', attrs={"target": '_blank'}).get("href") title = childer.find('a', attrs={"target": '_blank'}).get("title") self.sprider_detail(next_url, title, url, self.page_count) # 请请明细页面 except Exception as e: BaseFrame.__log__("请求一级栏目出现错误,倒霉...。" + str(e)) continue self.page_count = self.page_count + 1 def sprider_detail(self, url, title, second_path, pager): """ :param url: :param second_path: :param pager: :return: """ BaseFrame.__log__("正在采集" + title + "的图片...网址:" + url) threestring= url.split(".")[int(len(url.split("."))) - 2] threestring=threestring[0:len(threestring)-1] threestring=threestring+"{0}" # http://www.chidianfan.com/nenmo/list_1_1.html # http://www.chidianfan.com/nenmo/list_1_21.html #next_pager = (url.split(".")[int(len(url.split("."))) - 2].replace("-1", "-{0}")) next_pager = (url.split(".")[0] + "." + url.split(".")[1] + "." + threestring + ".html") try: response = requests.get(url, timeout=10, headers=UserAgent().get_random_header(url)) response.encoding = 'utf-8' soup = BeautifulSoup(response.text, "html5lib") pager_div = soup.find('div', attrs={"class": 'article-paper'}) page_end_number = int(pager_div.find('li').find('a').string.replace('共', '').replace('页:', '')) except Exception as e: BaseFrame.__err__("解析URL和查询分页的时候出现错误:" + str(e)) return for pager_index in range(page_end_number): try: next_pager_url = next_pager.format(pager_index + 1) print(next_pager_url) response = requests.get(next_pager_url, timeout=10, headers=UserAgent().get_random_header(next_pager_url)) response.encoding = 'utf-8' soup = BeautifulSoup(response.text, "html5lib") image_div = soup.find('div', attrs={"class": 'article-pic'}) if image_div is None: BaseFrame.__err__("很抱歉,页面没有找到" + next_pager_url) # print(soup.title) # 很抱歉,页面没有找到,秒钟之后将带你进入首页出错啦 continue page_pic_index = 1 for a in image_div: title = a.find('img').get("alt") url = a.find('img').get("src").strip() print(url) mmEntity = SpriderEntity() # 依据图片执行,下载过的图片不再下载 mmEntity.sprider_base_url = self.base_url mmEntity.create_datetime = Tools.get_current_datetime() mmEntity.sprider_url = url mmEntity.sprider_pic_title = title mmEntity.sprider_pic_index = str(pager_index * page_pic_index) mmEntity.sprider_pager_index = pager mmEntity.sprider_type = second_path if SpriderAccess().query_sprider_entity_by_urlandtitle(url, title) is None: SpriderAccess().save_sprider(mmEntity) DownLoadFile(self.save_path).__down_load_image__(url, title, second_path, pager_index * page_pic_index) else: BaseFrame.__log__(url + "数据采集过因此跳过") page_pic_index = page_pic_index + 1 except Exception as e: BaseFrame.__err__("采集图片出现错误,跳过本页图片。错误内容如下:" + str(e)) continue if __name__ == '__main__': Chidianfan().sprider()
import os
import requests
from bs4 import BeautifulSoup
from access.sprider.SpriderAccess import SpriderAccess
from base.BaseConfig import BaseConfig
from base.BaseFrame import BaseFrame
from business.sprider.DownLoadFile import DownLoadFile
from business.sprider.UserAgent import UserAgent
from object.entity.SpriderEntity import SpriderEntity
from plugin.Tools import Tools
class Chidianfan():
page_count = 1 # 每个栏目开始业务
base_url = "http://www.chidianfan.com" # 采集的网址
save_path = BaseConfig().CORPUS_ROOT + os.sep + "Chidian" # 顶级目录
dict_column_list = ["nenmo"] # 遍历的栏目
def __init__(self):
Tools.judge_diskpath_exits_create(self.save_path)
pass
def sprider(self):
BaseFrame.__log__("开始采集ChidianFan网站图片...")
for url in self.dict_column_list:
number = SpriderAccess().find_pager_by_url_and_type(self.base_url, url)
if number > 1:
self.page_count = number
BaseFrame.__log__("本次采集从分页" + str(number) + "开始。")
next_url = self.base_url + "/" + url + "/"
try:
response = requests.get(next_url, timeout=10, headers=UserAgent().get_random_header(next_url))
response.encoding = 'UTF-8'
soup = BeautifulSoup(response.text, "html5lib")
except Exception as e:
BaseFrame.__err__("请求一级栏目出现错误,倒霉...。" + str(e))
continue
laster_pager_url = soup.find('a', text='末页').get("href")
page_end_number = int(laster_pager_url.split(".")[0].split("_")[2])
while self.page_count <= int(page_end_number): # 翻完停止
if self.page_count == 1:
div_pic_list = soup.find_all('div', attrs={"class": 'pic'})
for childer in div_pic_list:
next_url = self.base_url + childer.find('a', attrs={"target": '_blank'}).get("href")
title = childer.find('a', attrs={"target": '_blank'}).get("title")
self.sprider_detail(next_url, title, url, self.page_count) # 请请明细页面
else:
next_page_url = self.base_url + "/" + url + "/list_1_{0}.html".format(self.page_count)
try:
response = requests.get(next_page_url, timeout=10,
headers=UserAgent().get_random_header(next_page_url))
response.encoding = 'UTF-8'
soup = BeautifulSoup(response.text, "html5lib")
div_pic_list = soup.find_all('div', attrs={"class": 'pic'})
for childer in div_pic_list:
next_url = self.base_url + childer.find('a', attrs={"target": '_blank'}).get("href")
title = childer.find('a', attrs={"target": '_blank'}).get("title")
self.sprider_detail(next_url, title, url, self.page_count) # 请请明细页面
except Exception as e:
BaseFrame.__log__("请求一级栏目出现错误,倒霉...。" + str(e))
continue
self.page_count = self.page_count + 1
def sprider_detail(self, url, title, second_path, pager):
"""
:param url:
:param second_path:
:param pager:
:return:
"""
BaseFrame.__log__("正在采集" + title + "的图片...网址:" + url)
threestring= url.split(".")[int(len(url.split("."))) - 2]
threestring=threestring[0:len(threestring)-1]
threestring=threestring+"{0}"
# http://www.chidianfan.com/nenmo/list_1_1.html
# http://www.chidianfan.com/nenmo/list_1_21.html
#next_pager = (url.split(".")[int(len(url.split("."))) - 2].replace("-1", "-{0}"))
next_pager = (url.split(".")[0] + "." + url.split(".")[1] + "." + threestring + ".html")
try:
response = requests.get(url, timeout=10, headers=UserAgent().get_random_header(url))
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, "html5lib")
pager_div = soup.find('div', attrs={"class": 'article-paper'})
page_end_number = int(pager_div.find('li').find('a').string.replace('共', '').replace('页:', ''))
except Exception as e:
BaseFrame.__err__("解析URL和查询分页的时候出现错误:" + str(e))
return
for pager_index in range(page_end_number):
try:
next_pager_url = next_pager.format(pager_index + 1)
print(next_pager_url)
response = requests.get(next_pager_url, timeout=10,
headers=UserAgent().get_random_header(next_pager_url))
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, "html5lib")
image_div = soup.find('div', attrs={"class": 'article-pic'})
if image_div is None:
BaseFrame.__err__("很抱歉,页面没有找到" + next_pager_url)
# print(soup.title) # 很抱歉,页面没有找到,秒钟之后将带你进入首页出错啦
continue
page_pic_index = 1
for a in image_div:
title = a.find('img').get("alt")
url = a.find('img').get("src").strip()
print(url)
mmEntity = SpriderEntity() # 依据图片执行,下载过的图片不再下载
mmEntity.sprider_base_url = self.base_url
mmEntity.create_datetime = Tools.get_current_datetime()
mmEntity.sprider_url = url
mmEntity.sprider_pic_title = title
mmEntity.sprider_pic_index = str(pager_index * page_pic_index)
mmEntity.sprider_pager_index = pager
mmEntity.sprider_type = second_path
if SpriderAccess().query_sprider_entity_by_urlandtitle(url, title) is None:
SpriderAccess().save_sprider(mmEntity)
DownLoadFile(self.save_path).__down_load_image__(url, title, second_path, pager_index * page_pic_index)
else:
BaseFrame.__log__(url + "数据采集过因此跳过")
page_pic_index = page_pic_index + 1
except Exception as e:
BaseFrame.__err__("采集图片出现错误,跳过本页图片。错误内容如下:" + str(e))
continue
if __name__ == '__main__':
Chidianfan().sprider()
采集迟点翻MM图片