1、对于任意网站局部右键点击查看元素,可找到其在源代码中的位置,不好截图但是很简单,可以自己尝试。
2、分析完文本结构以后,开始写爬虫代码
```python
import os
import re
import requests
from bs4 import BeautifulSoup
# 提取每张图片的地址url
def get_ads():
ads = []
pic_box =[]
dic = {}
header = {
'User-Agent': 'Mozzila/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'
'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',
'referers': 'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=15007414_dg&wd=%E5%86%99%E7%9C%9F%E7%BD%91&oq=%25E6%2580%25A7%25E6%2584%259F%25E7%25BD%2591%25E5%259B%25BE&rsv_pq=ac0a3cca00068ccc&rsv_t=d9acJl7sssPsxcSiggO3UZLvxDC%2FBvWIh%2FdL6BvEyOB4qUddIr41heG1lLBpAmLa3D4&rqlang=cn&rsv_enter=1&rsv_dl=tb&inputT=2639&rsv_sug3=26&rsv_sug1=20&rsv_sug7=100&rsv_sug2=0&rsv_sug4=2639',
'Connection': 'keep-alive'
}
adr = '111.178.233.149:8081'
adrs = '123.169.169.240:9999'
proxy = {
'http://' + adr,
'https://' + adrs
}
for t in range(1, 2):
url = 'https://www.yeitu.com/tag/youguowang/?page=%d' %t
r = requests.get(url, headers=header, timeout=20)
html = r.text
soup = BeautifulSoup(html, 'lxml')
pic_list = soup.find('div', id='tag_box')
pic_box = pic_list.find_all('div', class_='tag_list')
for i in pic_box:
href = i.a['href']
title = i.div.a.string
dic[title] = href
htm = requests.get(href, headers=header)
html_Soup = BeautifulSoup(htm.text, 'lxml')
max_span = html_Soup.find('div', id='pages').find_all('a')[-2].get_text()
e_url = re.findall(r'.{50}', href) # 匹配前五十个任意字符保存到表单
for page in range(1, int(max_span) + 1):
page_url = e_url[0] + '_' + str(page) + '.html' # 提取表单第一个元素,把整型page转化成字符型
ads.append(page_url)
return ads # 存储了单张图片的网址
def save_pic(ad):
header = {
'User-Agent': 'Mozzila/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'
'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',
'referers': 'https://www.yeitu.com/tag/youguowang/?page=%d',
'Connection': 'keep-alive'
} # referers告诉目标网站你是从哪里跳转到该网站的
for url in ad:
print(url)
r = requests.get(url, headers=header)
soup = BeautifulSoup(r.text, 'lxml')
try:
pic_url0 = soup.body.find('div', 'w row').find('div', 'picture').find('div', 'img_content')
pic_url = pic_url0.find('div', 'img_box').find('a').find('img')['src']
print(pic_url)
name = pic_url[-9:-4]
img = requests.get(pic_url, headers=header)
f = open(name+'.jpg', 'ab') # 写入多媒体必须要有b这个参数
f.write(img.content) # 多媒体文件要用content写入
f.close()
except:
print('error 404')
continue
if __name__ == '__main__':
ad = get_ads()
save_pic(ad)
3、有的地方没能爬到图片,原因暂不深究,读者可以自行处理,本次只把他抛出,继续运行。可以搜索如何将爬取的图片存入指定路径,本次未设定存储路径,爬取的图片直接存在项目文件里。