python爬取换页_python爬虫实现获取下一页代码

我们首先来看下实例代码:

from time import sleep

import faker

import requests

from lxml import etree

fake = faker.Faker()

base_url = "http://angelimg.spbeen.com"

def get_next_link(url):

content = downloadHtml(url)

html = etree.HTML(content)

next_url = html.xpath("//a[@class='ch next']/@href")

if next_url:

return base_url + next_url[0]

else:

return False

def downloadHtml(ur):

user_agent = fake.user_agent()

headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}

response = requests.get(url, headers=headers)

return response.text

def getImgUrl(content):

html = etree.HTML(content)

img_url = html.xpath('//*[@id="content"]/a/img/@src')

title = html.xpath(".//div['@class=article']/h2/text()")

return img_url[0],title[0]

def saveImg(title,img_url):

if img_url is not None and title is not None:

with open("txt/"+str(title)+".jpg",'wb') as f:

user_agent = fake.user_agent()

headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}

content = requests.get(img_url, headers=headers)

#request_view(content)

f.write(content.content)

f.close()

def request_view(response):

import webbrowser

request_url = response.url

base_url = '

' %(request_url)

base_url = base_url.encode()

content = response.content.replace(b"

",base_url)

tem_html = open('tmp.html','wb')

tem_html.write(content)

tem_html.close()

webbrowser.open_new_tab('tmp.html')

def crawl_img(url):

content = downloadHtml(url)

res = getImgUrl(content)

title = res[1]

img_url = res[0]

saveImg(title,img_url)

if __name__ == "__main__":

url = "http://angelimg.spbeen.com/ang/4968/1"

while url:

print(url)

crawl_img(url)

url = get_next_link(url)

python 爬虫如何执行自动下一页循环加载文字

from bs4 import BeautifulSoup

import requests

import time

from lxml import etree

import os

# 该demo执行的为如何利用bs去爬一些文字

def start():

# 发起网络请求

html=requests.get('http://www.baidu.com')

#编码

html.encoding=html.apparent_encoding

#创建sp

soup=BeautifulSoup(html.text,'html.parser')

print(type(soup))

print('打印元素')

print(soup.prettify())

#存储一下title 该方法没有提示直接展示

title=soup.head.title.string

print(title)

# 写入文本

with open(r'C:/Users/a/Desktop/a.txt','w') as f:

f.write(title)

print(time.localtime())

url_2 = 'http://news.gdzjdaily.com.cn/zjxw/politics/sz_4.shtml'

def get_html_from_bs4(url):

# response = requests.get(url,headers=data,proxies=ip).content.decode('utf-8')

response = requests.get(url).content.decode('utf-8')

soup = BeautifulSoup(response, 'html.parser')

next_page = soup.select('#displaypagenum a:nth-of-type(9)')[0].get('href')

# for i in nett

print(next_page)

next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page

def get_html_from_etree(url):

response = requests.get(url).content.decode('utf-8')

html= etree.HTML(response)

next_page = html.xpath('.//a[@class="PageNum"][8]/@href')[0]

print(next_page)

# next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page

get_html_from_etree(url_2)

if __name__ == '__main__':

start()

到此这篇关于python爬虫实现获取下一页代码的文章就介绍到这了,更多相关python爬虫获取下一页内容请搜索我们以前的文章或继续浏览下面的相关文章希望大家以后多多支持我们!

本文标题: python爬虫实现获取下一页代码

本文地址: http://www.cppcns.com/jiaoben/python/302930.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值