话不都说,先上代码:
import urllib.parse
import urllib.request
from lxml import etree
import time
import os
def handle_request(url, page):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
}
# 由于第一页和后面页码规律不一致,所以要进行判断
if page == 1:
url = url.format('')
else:
url = url.format('_' + str(page))
# print(url)
request = urllib.request.Request(url=url,headers=headers)
return request
def download_image(image_src,i):
dirpath = 'gudian'
# 创建一个文件夹
print('正在下载第'+str(i)+'张图片')
if not os.path.exists(dirpath):
os.mkdir(dirpath)
# 创建文件名
filename = os.path.basename(str(i)+'.png')
# 图片路径
filepath = os.path.join(dirpath, filename)
print(filepath)
# 发送请求,保存图片
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
}
request = urllib.request.Request(url=image_src, headers=headers)
response = urllib.request.urlopen(request)
with open(filepath, 'wb') as fp:
fp.write(response.read())
def parse_content(content):
# 生成对象
tree = etree.HTML(content)
image_list = tree.xpath('//div[@id="f-content"]/div/div/a/img/@data-src') # //div[@id="container"]/div/div/a/img/@src
# 懒加载
print(image_list)
# print(len(image_list))
# 遍历列表,依次下载图片
i = 1
for image_src in image_list:
i += 1
download_image(image_src,i)
def main():
url = 'http://www.51yuansu.com/search/canyinkatong.html'
# http://sc.chinaz.com/tupian/gudianmeinvtupian_2.html
start_page = int(input("请输入开始页码:"))
end_page = int(input("请输入结束页码:"))
for page in range(start_page,end_page+1):
print("第%s页开始下载······" % page)
request = handle_request(url, page)
content = urllib.request.urlopen(request).read().decode()
# 解析内容
parse_content(content)
time.sleep(2)
print("第%s页下载完毕······" % page)
print("下载完成!")
if __name__ == "__main__":
main()
运行结果如图:
我们来看一下网站的目录结构
然后查看网页源代码发现这个懒加载图片的链接地址是存放在data-src当中的 写对应代码
image_list = tree.xpath(’//div[@id=“f-content”]/div/div/a/img/@data-src’) 一个简单的爬虫就实现了 各位想要爬取自己想爬取网页的图片的话 理清目录结构 根据情况修改一下就可以了