1、第二页开始访问的url地址发送变化,为.../index_x.html
通过F12找到每个图片进入详情页的url
url = "https://pic.netbian.com/index_{}.html".format(page_num)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
res = requests.get(url=url, headers=headers)
res.encoding = 'gbk'
2、创建etree对象,通过xpath找到进入详情页的url,即找一个如:
https://pic.netbian.com/tupian/33187.html
# 创建tree对象
tree = etree.HTML(res.text)
# print(url)
# 通过xpath找到对应标签
li_list = tree.xpath('//div[@class="slist"]/ul[@class="clearfix"]/li/a/@href')
3、通过循环把li_list内容遍历出来,并找到对应具体图片的链接和名字
即找到:
https://pic.netbian.com/uploads/allimg/240118/165528-1705568128cf50.jpg
财神爷指谁谁发财5120x1440双屏壁纸
找到后并写入
for li in li_list:
base_url = 'https://pic.netbian.com'
# 拼接成完成的详情页链接,如https://pic.netbian.com/tupian/33187.html形式的
href = base_url + li
# print(href)
# 请求href
res1 = requests.get(href, headers=headers)
res1.encoding = 'gbk'
# print(res1.text)
tree_detail = etree.HTML(res1.text)
# 找到具体图片的链接
src_detail = tree_detail.xpath('//a[@id="img"]/img/@src')
# 找到具体图片的名称
img_title = tree_detail.xpath('//a[@id="img"]/img/@alt')
# 返回的是列表,都取第0号元素
img_title = img_title[0]
src_detail = src_detail[0]
# 拼接成为完成的链接
detail_href = base_url + src_detail
print(detail_href, img_title)
with open(path + "\{}.jpg".format(img_title), mode='wb') as d_img:
img_res = requests.get(detail_href)
d_img.write(img_res.content)
4、把抓取页数量设置为可输入,并将该变量传到多页抓取的方法
page_num = int(input('请输入想抓取多少页的数据:'))
if __name__ == '__main__':
# 单页抓取的方法
if page_num == 1:
single_page_detail()
# 多页抓取
elif page_num > 1:
multi_page_detail(page_num)
5、多页抓取方法
def multi_page_detail(page_num):
for page_num in range(page_num, 1, -1):
url = "https://pic.netbian.com/index_{}.html".format(page_num)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
res = requests.get(url=url, headers=headers)
res.encoding = 'gbk'
# print(res.text)
# 创建tree对象
tree = etree.HTML(res.text)
# print(url)
# 通过xpath找到对应标签
li_list = tree.xpath('//div[@class="slist"]/ul[@class="clearfix"]/li/a/@href')
for li in li_list:
base_url = 'https://pic.netbian.com'
# 拼接成完成的详情页链接,如https://pic.netbian.com/tupian/33187.html形式的
href = base_url + li
# print(href)
# 请求href
res1 = requests.get(href, headers=headers)
res1.encoding = 'gbk'
# print(res1.text)
tree_detail = etree.HTML(res1.text)
# 找到具体图片的链接
src_detail = tree_detail.xpath('//a[@id="img"]/img/@src')
# 找到具体图片的名称
img_title = tree_detail.xpath('//a[@id="img"]/img/@alt')
# 返回的是列表,都取第0号元素
img_title = img_title[0]
src_detail = src_detail[0]
# 拼接成为完成的链接
detail_href = base_url + src_detail
print(detail_href, img_title)
with open(path + "\{}.jpg".format(img_title), mode='wb') as d_img:
img_res = requests.get(detail_href)
d_img.write(img_res.content)
single_page_detail()
6、单页抓取方法
def single_page_detail():
url = 'https://pic.netbian.com'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
res = requests.get(url=url, headers=headers)
res.encoding = 'gbk'
# 创建tree对象
tree = etree.HTML(res.text)
# 通过xpath找到对应标签
li_list = tree.xpath('//div[@class="slist"]/ul[@class="clearfix"]/li/a/@href')
# print(li_list)
# 拼接url获取详情页完整地址
for base_url in li_list:
# print(base_url)
detail_url = url + base_url
# print(detail_url)
# 再创建一个请求各个detail_url
res1 = requests.get(detail_url, headers=headers)
res1.encoding = 'gbk'
# print(res1.text)
# 创建一个tree_img对象去获取到对应详情页的图片的xpath对象
tree_img = etree.HTML(res1.text)
img_src = tree_img.xpath('//div[@class="photo-pic"]//img/@src')
# 创建一个tree_img对象去获取到对应详情页的图片的标题对象
img_title = tree_img.xpath('//div[@class="photo-pic"]//img/@alt')
for img in img_src:
# 拼接完整的图片地址
detail_href = url + img
# 标题取列表第一个
img_title = img_title[0]
print(detail_href, img_title)
with open(path + "\{}.jpg".format(img_title), mode='wb') as d_img:
img_res = requests.get(detail_href)
d_img.write(img_res.content)
完整代码
import os
import requests
from lxml import etree
page_num = int(input('请输入想抓取多少页的数据:'))
# 创建图片保存路径
path = '4k高清多页详情页图片'
if not os.path.exists(path):
os.mkdir(path)
def single_page_detail():
url = 'https://pic.netbian.com'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
res = requests.get(url=url, headers=headers)
res.encoding = 'gbk'
# 创建tree对象
tree = etree.HTML(res.text)
# 通过xpath找到对应标签
li_list = tree.xpath('//div[@class="slist"]/ul[@class="clearfix"]/li/a/@href')
# print(li_list)
# 拼接url获取详情页完整地址
for base_url in li_list:
# print(base_url)
detail_url = url + base_url
# print(detail_url)
# 再创建一个请求各个detail_url
res1 = requests.get(detail_url, headers=headers)
res1.encoding = 'gbk'
# print(res1.text)
# 创建一个tree_img对象去获取到对应详情页的图片的xpath对象
tree_img = etree.HTML(res1.text)
img_src = tree_img.xpath('//div[@class="photo-pic"]//img/@src')
# 创建一个tree_img对象去获取到对应详情页的图片的标题对象
img_title = tree_img.xpath('//div[@class="photo-pic"]//img/@alt')
for img in img_src:
# 拼接完整的图片地址
detail_href = url + img
# 标题取列表第一个
img_title = img_title[0]
print(detail_href, img_title)
with open(path + "\{}.jpg".format(img_title), mode='wb') as d_img:
img_res = requests.get(detail_href)
d_img.write(img_res.content)
def multi_page_detail(page_num):
for page_num in range(page_num, 1, -1):
url = "https://pic.netbian.com/index_{}.html".format(page_num)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
res = requests.get(url=url, headers=headers)
res.encoding = 'gbk'
# print(res.text)
# 创建tree对象
tree = etree.HTML(res.text)
# print(url)
# 通过xpath找到对应标签
li_list = tree.xpath('//div[@class="slist"]/ul[@class="clearfix"]/li/a/@href')
for li in li_list:
base_url = 'https://pic.netbian.com'
# 拼接成完成的详情页链接,如https://pic.netbian.com/tupian/33187.html形式的
href = base_url + li
# print(href)
# 请求href
res1 = requests.get(href, headers=headers)
res1.encoding = 'gbk'
# print(res1.text)
tree_detail = etree.HTML(res1.text)
# 找到具体图片的链接
src_detail = tree_detail.xpath('//a[@id="img"]/img/@src')
# 找到具体图片的名称
img_title = tree_detail.xpath('//a[@id="img"]/img/@alt')
# 返回的是列表,都取第0号元素
img_title = img_title[0]
src_detail = src_detail[0]
# 拼接成为完成的链接
detail_href = base_url + src_detail
print(detail_href, img_title)
with open(path + "\{}.jpg".format(img_title), mode='wb') as d_img:
img_res = requests.get(detail_href)
d_img.write(img_res.content)
single_page_detail()
if __name__ == '__main__':
# 单页抓取的方法
if page_num == 1:
single_page_detail()
# 多页抓取
elif page_num > 1:
multi_page_detail(page_num)