import json
import os
import requests
from bs4 import BeautifulSoup
from access.sprider.SpriderAccess import SpriderAccess
from base.BaseConfig import BaseConfig
from base.BaseFrame import BaseFrame
from business.sprider.UserAgent import UserAgent
from object.entity.SpriderEntity import SpriderEntity
from plugin.Tools import Tools
class TooOpenMao:
page_count = 1 # 每个栏目开始业务
base_url = "https://www.tooopen.com/img/" # 采集的网址
save_path = BaseConfig().CORPUS_ROOT + os.sep + "TooOpen"
dict_column_list = {'maomao': '89_869_1_{0}.aspx'}
def __init__(self):
Tools.judge_diskpath_exits_create(self.save_path)
pass
def sprider(self):
BaseFrame.__log__("开始采集TooOpen网站图片...")
for column in self.dict_column_list:
number = SpriderAccess().find_pager_by_url_and_type(self.base_url, column)
if number > 1:
self.page_count = number
BaseFrame.__log__("本次采集从分页" + str(number) + "开始。")
urls = [] # 采集集合
next_url = self.dict_column_list[column]
next_url = self.base_url + next_url.format(self.page_count) # 第一个栏目无特殊处理
try:
response = requests.get(next_url)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, "html5lib")
except Exception as e:
BaseFrame.__err__("采集图片出现错误" + str(e) + "重新尝试中...")
response = requests.get(next_url)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, "html5lib")
json_str = (soup.find('div', attrs={"id": 'data-more'}).string) # JSON内容
json_list = json.loads(json_str)
for entity in json_list:
urls.append(entity["url"])
a_list = soup.find_all('a', attrs={"class": 'pic'}) # 加载的内容
for a in a_list:
urls.append(a.get("href"))
pager_list = soup.find('div', attrs={"class": 'page-nav'})
a_list = pager_list.find_all('a')
pager_index = len(a_list) - 2
self.page_end_number = int(a_list[pager_index].string) # 最后一页
while self.page_count <= int(self.page_end_number): # 翻完停止
if self.page_count == 1:
for url in urls:
self.sprider_image(url, column, 1)
else:
next_url = self.dict_column_list[column]
next_url = self.base_url + "/" + next_url.format(self.page_count) # 第一个栏目无特殊处理
try:
response = requests.get(next_url)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, "html5lib")
except Exception as e:
continue
json_str = (soup.find('div', attrs={"id": 'data-more'}).string) # JSON内容
json_list = json.loads(json_str)
for entity in json_list:
urls.append(entity["url"])
a_list = soup.find_all('a', attrs={"class": 'pic'}) # 加载的内容
for a in a_list:
urls.append(a.get("href"))
for url in urls:
self.sprider_image(url, column, self.page_count)
if self.page_count == self.page_end_number:
# if column == "junma":
# continue
# elif column == "gougou":
# continue
# 最后一页 读取下一个最大页码
pager_list = soup.find('div', attrs={"class": 'page-nav'})
a_list = pager_list.find_all('a')
pager_index = len(a_list) - 2
self.page_end_number = int(a_list[pager_index].string) # 最后一页
BaseFrame.__log__("......开启新篇章......")
BaseFrame.__log__("......开启新篇章......")
pass
pass
self.page_count = self.page_count + 1
pass
pass
BaseFrame.__log__("采集完毕...")
pass
def sprider_image(self, url, second_path, pager):
if url.find("https://www.tooopen.com") >= 0:
BaseFrame.__log__("开始采集" + url + "图片...")
try:
response = requests.get(url, timeout=10, headers=UserAgent().get_random_header(url))
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, "html5lib")
except Exception as e:
BaseFrame.__err__("采集图片出现错误" + str(e))
return
image = soup.find('img', attrs={"id": 'imgView'})
image_url = image.get("src")
image_title = image.get("title")
mmEntity = SpriderEntity() # 依据图片执行,下载过的图片不再下载
mmEntity.sprider_base_url = self.base_url
mmEntity.create_datetime = Tools.get_current_datetime()
mmEntity.sprider_url = image_url
mmEntity.sprider_pic_title = image_title
mmEntity.sprider_pic_index = str(1)
mmEntity.sprider_pager_index = pager
mmEntity.sprider_type = second_path
if SpriderAccess().query_sprider_entity_by_urlandtitle(image_url, image_title) is None:
SpriderAccess().save_sprider(mmEntity)
self.dowload_image(image_url, image_title, second_path)
else:
BaseFrame.__log__(image_url + "数据采集过因此跳过")
pass
def dowload_image(self, pic_url, pic_title, second_path):
try:
headers = {"Referer": pic_url,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko)Chrome/62.0.3202.94 Safari/537.36'}
img_content = requests.get(pic_url, headers=headers)
real_path = self.save_path + os.sep + second_path + os.sep
if (os.path.exists(real_path) is False):
os.makedirs(real_path)
if img_content.status_code == 200:
pic_title = pic_title
pic_cun = real_path + pic_title + '.jpg'
fp = open(pic_cun, 'wb')
fp.write(img_content.content)
fp.close()
except Exception as e:
BaseFrame.__log__("下载图片过程出现错误" + str(e))
return
if __name__ == "__main__":
TooOpenMao().sprider()
11-15
443
11-04
1万+
05-31