#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 21:18:48 2017
@author: vicky
"""
#cramler1的改写
# 实现一个简单的爬虫,爬取百度贴吧图片
# 1. urllib+re实现
import urllib.request
import re
#网址
url = 'http://tieba.baidu.com/p/2256306796'
#根据url获取网页html内容
page = urllib.request.urlopen(url)
html=page.read()
# 从html中解析出所有jpg图片的url
# 百度贴吧html中jpg图片的url格式为:<img ... src="XXX.jpg" width=...>
# 解析jpg图片url的正则,从<img开始找,包括src,到width>为止
jpgReg = re.compile(r'<img.+?src="(.+?\.jpg)" width') # 注:这里最后加一个'width'是为了提高匹配精确度
# 解析出jpg的url列表
jpgs = re.findall(jpgReg, html.decode('utf-8'))
# 封装:从百度贴吧网页下载图片
# 批量下载图片,默认保存到目录path下
path = '/Users/vicky/Documents/code/python/scrapy/'
# 用于给图片命名
count = 1
for url in jpgs:
# 用图片url下载图片并保存成制定文件名
urllib.request.urlretrieve(url,''.join([path,'{0}.jpg'.format(count)]))
count = count + 1
#2. requests + re实现
import requests
import re
from contextlib import closing
url = 'http://tieba.baidu.com/p/2256306796'
page = requests.get(url)
html=page.text
jpgReg = re.compile(r'<img.+?src="(.+?\.jpg)" width') # 注:这里最后加一个'width'是为了提高匹配精确度
# 解析出jpg的url列表
jpgs = re.findall(jpgReg,html)
# 用于给图片命名
count = 1
for url in jpgs:
with closing(requests.get(jpgs,stream = True)) as resp:
fileName=''.join([path,'{0}.jpg'.format(count)])
with open(fileName,'wb') as f:
for chunk in resp.iter_content(128):
f.write(chunk)
# downloadJPG(url,''.join([path,'{0}.jpg'.format(count)]))
print ('下载完成第{0}张图片'.format(count)) #加了这一句
count = count + 1