7 爬虫案例3:贴吧图片下载
目标:
访问贴吧,找出贴吧中每个帖子的链接
根据链接进入帖子,找出帖子中每张图片的链接地址
下载图片
需要用到xpath规则
7.1 获取内容
from urllib import request, parse
import ssl
import random
# 常用User-Agent列表
ua_list = [
'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ) AppleWebKit/534.12 (KHTML, like Gecko) Maxthon/3.0 Safari/534.12',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
]
# 加载一个页面
def loadPage(url):
# 在ua_list列表中随机选择一个UserAgent
userAgent = random.choice(ua_list)
headers = {
'User-Agent': userAgent
}
# 发起一个请求
req = request.Request(url, headers = headers)
#print(req) # <urllib.request.Request object at 0x007B1370>
# 创建未经过验证的上下文的代码
context = ssl._create_unverified_context()
# 打开响应的对象
response = request.urlopen(req, context=context)
#print(response) # <http.client.HTTPResponse object at 0x01F36BF0>
# 获取响应的内容
html = response.read()
# 对获取到的unicode编码进行解码
content = html.decode('utf-8')
return content
if __name__ == '__main__':
url = 'https://tieba.baidu.com/f?kw=%E5%8A%A8%E6%BC%AB&ie=utf-8&pn=50'
content = loadPage(url)
print(content)
7.2 找出帖吧中帖子的链接地址
在python中使用xpath规则,需要安装库LXML
pip install lxml -i http://pypi.douban.com/simple --trusted-host pypi.douban.com
参考代码:
# 加载一个页面
def loadPage(url):
# 在ua_list列表中随机选择一个UserAgent
userAgent = random.choice(ua_list)
headers = {
'User-Agent': userAgent
}
# 发起一个请求
req = request.Request(url)
#print(req) # <urllib.request.Request object at 0x007B1370>
# 创建未经过验证的上下文的代码
context = ssl._create_unverified_context()
# 打开响应的对象
response = request.urlopen(req, context=context)
#print(response) # <http.client.HTTPResponse object at 0x01F36BF0>
# 获取响应的内容
html = response.read()
# 对获取到的unicode编码进行解码
content = html.decode('utf-8')
# 使用etree对html的内容建立文档树
content = etree.HTML(content)
link_list = content.xpath('//a[@class="j_th_tit "]/@href')
for link in link_list:
fulllink = 'https://tieba.baidu.com' + link
print(fulllink)
此时,可以把每个帖子的链接输出
7.3 找出帖子中图片的链接地址
# 加载贴吧中帖子的链接地址
def loadPage(url):
...
# 使用etree对html的内容建立文档树
content = etree.HTML(content)
link_list = content.xpath('//a[@class="j_th_tit "]/@href')
for link in link_list:
fulllink = 'https://tieba.baidu.com' + link
loadImage(fulllink)
# 加载帖子中图片的链接地址
def loadImage(url):
# 在ua_list列表中随机选择一个UserAgent
userAgent = random.choice(ua_list)
headers = {
'User-Agent': userAgent
}
# 发起一个请求
req = request.Request(url)
# 创建未经过验证的上下文的代码
context = ssl._create_unverified_context()
# 打开响应的对象
response = request.urlopen(req, context=context)
# 获取响应的内容
html = response.read()
# 对获取到的unicode编码进行解码
content = html.decode('utf-8')
# 使用etree对html的内容建立文档树
content = etree.HTML(content)
link_list = content.xpath('//img[@class="BDE_Image"]/@src')
for link in link_list:
print(link)
7.4 把图片保存到文件中
# 加载帖子中图片的链接地址
def loadImage(url):
...
# 使用etree对html的内容建立文档树
content = etree.HTML(content)
link_list = content.xpath('//img[@class="BDE_Image"]/@src')
for link in link_list:
print(link)
writeImage(link)
# 把图片下载并保存到本地
def writeImage(url):
# 在ua_list列表中随机选择一个UserAgent
userAgent = random.choice(ua_list)
headers = {
'User-Agent': userAgent
}
# 发起一个请求
req = request.Request(url)
# 创建未经过验证的上下文的代码
context = ssl._create_unverified_context()
# 打开响应的对象
response = request.urlopen(req, context=context)
# 获取响应的内容
image = response.read()
# 把文件保存到文本
filename = url[-10:] # f57882.jpg
f = open(filename, 'wb')
f.write(image)
f.close()
7.5 用户输入参数
代码略。自行实现,可参照前面个人同专栏文章里的例子。
https://blog.csdn.net/Smart_J_King/article/details/108758163