首先先进入网站http://www.gengzhongbang.com/haichongtupu/banchimuhaichong/
URL就是网址,headers看图一
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.7 Safari/537.36'}
# URL就是网址,headers看图一
url = 'http://www.gengzhongbang.com/haichongtupu/banchimuhaichong/'
使用requests模块的get()方法可以获得一个网址的源码,并对每一张图片进行处理,然后以wb(二进制)的形式保存图片,并进行命名。
对网站发起请求,解析网页,将从互联网上获取的源码数据加载到该对象中
def get_img(img_url, name, path):
# 对每一张图片进行处理
response = requests.get(img_url, headers)
content = response.content
# 以wb(二进制)的形式保存图片,并进行命名
with open(path + '/{}.jpg'.format(name), 'wb') as f:
f.write(content)
def get_url_tree(url):
# 对网站发起请求
page_test = requests.get(url=url, headers=headers).text
# 这里是将从互联网上获取的源码数据加载到该对象中
tree = etree.HTML(page_test)
return tree
f12查看元素,发现每种害虫地址的规律
复制其中一个的xpath,最后面中括号中的1代表第一种害虫,将每种害虫的地址放在fu_list列表中
j = 0
for _ in range(21):
j += 1
all_url = "http://www.gengzhongbang.com/haichongtupu/linchimuhaichong/list-{}".format(j)
# // *[ @ id = "ct"] / div[1] / div[2] / div[2] / dl[1]
tree = get_url_tree(all_url)
fu_list = tree.xpath('//*[@id="ct"]/div[1]/div[2]/div[2]/dl')
print(fu_list)
查找第一个害虫的网址,复制其xpath,网址为href之后的链接,则直接@href即可。[0]是指将爬取下来的地址放到一个列表中。text()是获取其中的文本。
for li in fu_list:
# print(li)
# 这里 ./是对前面li的继承,相当于li/div...
# // *[ @ id = "ct"] / div[1] / div[2] / div[2] / dl[1] / dt / h2 / a
title = li.xpath('./dt/h2/a/text()')[0]
new_url = li.xpath('./dt/h2/a/@href')[0]
fu_name = li.xpath('//*[@id="ct"]/div[1]/div[2]/div[1]/h1/text()')[0]
print(title)
print(new_url)
print(fu_name)
进入第一个害虫资料,用f12查看图片,可以看到其图片地址,和上面一样,可以发现两个图片xpath之间的关联,也就是center后面的[1]不同,这样通过循环可以爬取到每一张照片。
因为爬取到的网址不完整,需要添加上前面的http://www.gengzhongbang.com/形成完整的图片。
zi_tree = get_url_tree(new_url)
# // *[ @ id = "article_content"] / center[1]
zi_list = zi_tree.xpath('//*[@id="article_content"]/center')
i = 0
for zizi in zi_list:
try:
zizi_img_url = zizi.xpath('./a/@href')[0]
he_url = 'http://www.gengzhongbang.com/' + zizi_img_url
zizi_name = zizi.xpath('./a/@title')[0]
print(all_img_path, he_url)
get_img(he_url, zizi_name + str(i), all_img_path)
i += 1
except:
continue
这样,想要的网址,名称,图片就爬取下来了。
完整代码如下:
# encodong:utf-8
import requests
from lxml import etree
import os
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.7 Safari/537.36'}
# URL就是网址,headers看图一
url = 'http://www.gengzhongbang.com/haichongtupu/banchimuhaichong/'
def get_img(img_url, name, path):
# 对每一张图片进行处理
response = requests.get(img_url, headers)
content = response.content
# 以wb(二进制)的形式保存图片,并进行命名
with open(path + '/{}.jpg'.format(name), 'wb') as f:
f.write(content)
def get_url_tree(url):
# 对网站发起请求
page_test = requests.get(url=url, headers=headers).text
# 这里是将从互联网上获取的源码数据加载到该对象中
tree = etree.HTML(page_test)
return tree
root_path = r'E:\pythonProject\py\爬虫\害虫爬取'
j = 0
for _ in range(21):
j += 1
all_url = "http://www.gengzhongbang.com/haichongtupu/linchimuhaichong/list-{}".format(j)
# // *[ @ id = "ct"] / div[1] / div[2] / div[2] / dl[1]
tree = get_url_tree(all_url)
fu_list = tree.xpath('//*[@id="ct"]/div[1]/div[2]/div[2]/dl')
# print(fu_list)
# 先看图二的解释,这里li有多个,所里返回的li_list是一个列表
for li in fu_list:
# print(li)
# 这里 ./是对前面li的继承,相当于li/div...
# // *[ @ id = "ct"] / div[1] / div[2] / div[2] / dl[1] / dt / h2 / a
title = li.xpath('./dt/h2/a/text()')[0]
new_url = li.xpath('./dt/h2/a/@href')[0]
fu_name = li.xpath('//*[@id="ct"]/div[1]/div[2]/div[1]/h1/text()')[0]
print(fu_name)
fu_path = os.path.join(root_path, fu_name)
all_img_path = fu_path + "/img"
if not os.path.exists(fu_path):
os.mkdir(fu_path)
if not os.path.exists(all_img_path):
os.mkdir(all_img_path)
zi_tree = get_url_tree(new_url)
zi_list = zi_tree.xpath('//*[@id="article_content"]/center')
i = 0
for zizi in zi_list:
try:
zizi_img_url = zizi.xpath('./a/@href')[0]
he_url = 'http://www.gengzhongbang.com/' + zizi_img_url
zizi_name = zizi.xpath('./a/@title')[0]
print(all_img_path, he_url)
get_img(he_url, zizi_name + str(i), all_img_path)
i += 1
except:
continue