1.依赖
requests、BeautifulSoup、lxml
2.代码
爬取网址:http://www.chebiao.com.cn/chebiao/
import requests
from bs4 import BeautifulSoup
list = ['A', 'B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'W', 'X', 'Y', 'Z']
#用于存车标网分类地址
newList = []
www = "http://www.chebiao.com.cn"
#根据网站分析可知,每种开头的车标url分别为http://www.chebiao.com.cn/字母/
for i in list:
temp = www + "/chebiao/" + i.lower() + "/"
newList.append(temp)
#用于存获取到的图片url地址
imgUrls = []
#用于存车标名字,顺序和图片顺序一致
nameList = []
#爬取图片地址
for x in newList:
response = requests.get(x, timeout=3000)
response.encoding = 'gbk'
soup = BeautifulSoup(response.text, 'lxml')
lis = soup.select(".chebiao ul li")
for li in lis:
imgUrls.append(www + str(li.select_one("img")['src']))
nameList.append(li.select_one("span").text)
index = 0
#根据图片url list中的地址下载图片到本地
for url in imgUrls:
request1 = requests.get(url)
path = 'C:\\Users\\Lance\\Desktop\\1\\' + str(nameList[index][0:-2]+".jpg")
with open(path, 'wb') as f:
f.write(request1.content)
f.flush()
f.close()
index = index + 1
3.效果图