#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 2 16:45:17 2017
@author: vicky
"""
#import scrapy
#
#class DmozItem(scrapy.Item):
# title = scrapy.Field()
# link = scrapy.Field()
# desc = scrapy.Field()
#
#import scrapy
#
#class DmozSpider(scrapy.spiders.Spider):
# name = "dmoz"
# allowed_domains = ["dmoz.org"]
# start_urls = [
# "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
# "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
# ]
#
# def parse(self, response):
# filename = response.url.split("/")[-2]
# with open(filename, 'wb') as f:
# f.write(response.body)
#
#
#import requests
#url = "http://shuang0420.github.io/"
#r = requests.get(url)
#
##from urllib.request import urlopen
### request source file
##url = "http://shuang0420.github.io/"
##request = urllib.Request(url) # write a letter
##response = urllib.urlopen(request) # send the letter and get the reply
##page = response.read() # read the reply
### save source file
##webFile = open('webPage.html', 'wb')
##webFile.write(page)
##webFile.close()
#
#
#import urllib
#
#def getHtml(url):
# page = urllib.urlopen(url)
# html = page.read()
# return html
#
#html = getHtml("http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/600346.phtml")
#
#print html
# 实现一个简单的爬虫,爬取百度贴吧图片
# 1. urllib+re实现
import urllib.request
import re
# 根据url获取网页html内容
def getHtmlContent(url):
page = urllib.request.urlopen(url)
return page.read()
# 从html中解析出所有jpg图片的url
# 百度贴吧html中jpg图片的url格式为:<img ... src="XXX.jpg" width=...>
def getJPGs(html):
# 解析jpg图片url的正则,从<img开始找,包括src,到width>为止
jpgReg = re.compile(r'<img.+?src="(.+?\.jpg)" width') # 注:这里最后加一个'width'是为了提高匹配精确度
# 解析出jpg的url列表
jpgs = re.findall(jpgReg, html.decode('utf-8'))
return jpgs
# 用图片url下载图片并保存成制定文件名
def downloadJPG(imgUrl,fileName):
urllib.request.urlretrieve(imgUrl,fileName)
# 批量下载图片,默认保存到当前目录下
def batchDownloadJPGs(imgUrls,path = '/Users/vicky/Documents/code/python/scrapy/'):
# 用于给图片命名
count = 1
for url in imgUrls:
downloadJPG(url,''.join([path,'{0}.jpg'.format(count)]))
count = count + 1
# 封装:从百度贴吧网页下载图片
def download(url):
html = getHtmlContent(url)
jpgs = getJPGs(html)
batchDownloadJPGs(jpgs)
def main():
url = 'http://tieba.baidu.com/p/2256306796'
download(url)
if __name__ == '__main__':
main()
#2. requests + re实现
import requests
import re
# 根据url获取网页html内容
def getHtmlContent(url):
page = requests.get(url)
return page.text
# 从html中解析出所有jpg图片的url
# 百度贴吧html中jpg图片的url格式为:<img ... src="XXX.jpg" width=...>
def getJPGs(html):
jpgReg = re.compile(r'<img.+?src="(.+?\.jpg)" width') # 注:这里最后加一个'width'是为了提高匹配精确度
# 解析出jpg的url列表
jpgs = re.findall(jpgReg,html)
return jpgs
# 用图片url下载图片并保存成制定文件名
####### 跟上面不一样 ######
def downloadJPG(imgUrl,fileName):
# 可自动关闭请求和响应的模块
from contextlib import closing
with closing(requests.get(imgUrl,stream = True)) as resp:
with open(fileName,'wb') as f:
for chunk in resp.iter_content(128):
f.write(chunk)
# 批量下载图片,默认保存到当前目录下
def batchDownloadJPGs(imgUrls,path = '/Users/vicky/Documents/code/python/scrapy/'):
# 用于给图片命名
count = 1
for url in imgUrls:
downloadJPG(url,''.join([path,'{0}.jpg'.format(count)]))
print ('下载完成第{0}张图片'.format(count)) #加了这一句
count = count + 1
# 封装:从百度贴吧网页下载图片
def download(url):
html = getHtmlContent(url)
jpgs = getJPGs(html)
batchDownloadJPGs(jpgs)
def main():
url = 'http://tieba.baidu.com/p/2256306796'
download(url)
if __name__ == '__main__':
main()