python爬虫下一页_python 爬虫 循环分页

import os

from time import sleep

import faker

import requests

from lxml import etree

fake = faker.Faker()

base_url = "http://angelimg.spbeen.com"

def get_next_link(url):

content = downloadHtml(url)

html = etree.HTML(content)

next_url = html.xpath("//a[@class='ch next']/@href")

if next_url:

return base_url + next_url[0]

else:

return False

def downloadHtml(ur):

user_agent = fake.user_agent()

headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}

response = requests.get(url, headers=headers,timeout=20)

if response.status_code != 200:

return None

else:

return response.text

def getImgUrl(content):

html = etree.HTML(content)

img_url = html.xpath('//*[@id="content"]/a/img/@src')

title = html.xpath(".//div['@class=article']/h2/text()")

return img_url[0],title[0]

def saveImg(title,img_url):

if img_url is not None and title is not None:

title = title.split('【')[0]

file_path = 'isssss/{}/'.format(title)

if not os.path.exists(file_path):

os.makedirs(file_path)

file_name = img_url.split('/')[-1]

with open(file_path+file_name+".jpg",'wb') as f:

user_agent = fake.user_agent()

headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}

content = requests.get(img_url, headers=headers,timeout=20)

#request_view(content)

f.write(content.content)

print("save img "+ img_url)

f.close()

def request_view(response):

import webbrowser

request_url = response.url

base_url = '

' %(request_url)

base_url = base_url.encode()

content = response.content.replace(b"

",base_url)

tem_html = open('tmp.html','wb')

tem_html.write(content)

tem_html.close()

webbrowser.open_new_tab('tmp.html')

def optimizeContent(res):

res = res.replace('b\'', '')

res = res.replace('\\n', '')

res = res.replace('\'', '')

res = res.replace('style', 'nouse')

res = res.replace('\.', '')

return res

def crawl_img(url):

content = downloadHtml(url)

if content is not None:

res = getImgUrl(content)

title = res[1]

img_url = res[0]

title = optimizeContent(title)

title = title.replace('.', '')

print(title)

saveImg(title,img_url)

return True

else:

return None

if __name__ == "__main__":

try:

root_url = "http://angelimg.spbeen.com/ang/{}"

for i in range(37,10000):

url = root_url.format(i)

try:

while url:

res = crawl_img(url)

if res is None:

print(url + ' 无数据')

next = i + 1

url = root_url.format(next)

break

else:

url = get_next_link(url)

print("爬取页面:" + url)

i = i + 1

except Exception as e:

print(str(e))

except Exception as e:

print(str(e))

结果

730162-20200313134139891-1034843861.png

730162-20200313134151327-1280501548.png

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值