用python实现一个简单的爬虫,爬取百度贴吧图片
1.安装python库
import urllib.request
import re
2.挖掘网页内容
网址: https://tieba.baidu.com/p/4814458788
目的:对本网址中的图片进行抓取
对网页源代码挖掘发现图片的url格式为:
3.根据url获取网页内容
def getHtml(url):
page=urllib.request.urlopen(url)
html=page.read()
return html
4.从html中解析图片的url(使用正则表达式)
def getJPGs(html):
jpg = r'<img.*?src="(.*?jpg)" size' #只取()里的字符串
html = html.decode('utf-8') #编码
jpgs = re.findall(jpg, html)
return jpgs
5.总体代码
import urllib.request
import re
# 根据url获取网页html内容
def getHtmlContent(url):
page = urllib.request.urlopen(url)
html=page.read()
return html
def getJPGs(html):
# 解析jpg图片url的正则
jpgReg = r'<img.*?src="(.*?jpg)" size' #只取()里的字符串
html = html.decode('utf-8')
jpgs = re.findall(jpgReg, html)
return jpgs
# 用图片url下载图片并保存成制定文件名
def downloadJPG(imgUrl, fileName):
urllib.request.urlretrieve(imgUrl, fileName)
# 批量下载图片,默认保存到当前目录下
def batchDownloadJPGs(imgUrls, path='./'):
# 用于给图片命名
count = 1
for url in imgUrls:
downloadJPG(url, ''.join([path, '{0}.jpg'.format(count)]))
count = count + 1
# 封装:从百度贴吧网页下载图片
def download(url):
html = getHtmlContent(url)
jpgs = getJPGs(html)
batchDownloadJPGs(jpgs)
def main():
url = 'https://tieba.baidu.com/p/4814458788'
download(url)
if __name__ == '__main__':
main()