爬虫实例,整个网页保存为本地文档,图片下载保存为本地图片
import requests
import random
from lxml import etree
import urllib.request
class Download:
def html_data(self):
"""获取目标 url 内容
"""
url = "http://military.china.com/weapon/aircraft/zdj/130002600/20190724/10390.html"
USER_AGENT = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)"
headers = {'User-Agent': USER_AGENT}
r = requests.get(url, headers=headers)
data = r.content
return data
def download_html(self, data):
"""html页面写入文件,存为.html格式文档
"""
output_html_path = './zh.html'
with open(output_html_path, 'wb') as f_html:
f_html.write(data)
def download_img(self, data):
"""解析img-url, 保存图片
"""
html = etree.HTML(data)
img_href = html.xpath('//*/ul[@class="article-related-img clearfix"]/li/a/img/@src')
img_url = img_href[0]
print(img_url)
img = requests.get(img_url, headers=headers)
img = img.content
# img 图片写入文件
output_img_path = './zh-' + img_href[0].split('/')[-1]
with open(output_img_path, 'wb') as f_img:
f_img.write(img)
if __name__=='__main__':
D = Download()
data = D.html_data()
D.download_html(data)
D.download_img(data)
Python3向url发起请求,读html网页两种方法:
import urllib.request
r = urllib.request.urlopen(url)
r = r.read()
import requests
r = requests.get(url, headers=headers)
r = r.content