cramler.py-20171128

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep  2 16:45:17 2017

@author: vicky
"""
#import scrapy
#
#class DmozItem(scrapy.Item):
#    title = scrapy.Field()
#    link = scrapy.Field()
#    desc = scrapy.Field()
#    
#import scrapy
#
#class DmozSpider(scrapy.spiders.Spider):
#    name = "dmoz"
#    allowed_domains = ["dmoz.org"]
#    start_urls = [
#        "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
#        "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
#    ]
#
#    def parse(self, response):
#        filename = response.url.split("/")[-2]
#        with open(filename, 'wb') as f:
#            f.write(response.body)
#
#
#import requests
#url = "http://shuang0420.github.io/"
#r = requests.get(url)
#
##from urllib.request import urlopen
### request source file
##url = "http://shuang0420.github.io/"
##request = urllib.Request(url)  # write a letter
##response = urllib.urlopen(request)  # send the letter and get the reply
##page = response.read()  # read the reply
### save source file
##webFile = open('webPage.html', 'wb')
##webFile.write(page)
##webFile.close()
#
#
#import urllib
#
#def getHtml(url):
#    page = urllib.urlopen(url)
#    html = page.read()
#    return html
#
#html = getHtml("http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/600346.phtml")
#
#print html



# 实现一个简单的爬虫,爬取百度贴吧图片

# 1. urllib+re实现
import urllib.request
import re

# 根据url获取网页html内容
def getHtmlContent(url):
    page = urllib.request.urlopen(url)
    return page.read()

# 从html中解析出所有jpg图片的url
# 百度贴吧html中jpg图片的url格式为:<img ... src="XXX.jpg" width=...>
def getJPGs(html):
    # 解析jpg图片url的正则,从<img开始找,包括src,到width>为止
    jpgReg = re.compile(r'<img.+?src="(.+?\.jpg)" width')  # 注:这里最后加一个'width'是为了提高匹配精确度
    # 解析出jpg的url列表
    jpgs = re.findall(jpgReg, html.decode('utf-8'))
    
    return jpgs

# 用图片url下载图片并保存成制定文件名
def downloadJPG(imgUrl,fileName):
    urllib.request.urlretrieve(imgUrl,fileName)
    
# 批量下载图片,默认保存到当前目录下
def batchDownloadJPGs(imgUrls,path = '/Users/vicky/Documents/code/python/scrapy/'):
    # 用于给图片命名
    count = 1
    for url in imgUrls:
        downloadJPG(url,''.join([path,'{0}.jpg'.format(count)]))
        count = count + 1

# 封装:从百度贴吧网页下载图片
def download(url):
    html = getHtmlContent(url)
    jpgs = getJPGs(html)
    batchDownloadJPGs(jpgs)
    
def main():
    url = 'http://tieba.baidu.com/p/2256306796'
    download(url)
    
if __name__ == '__main__':
    main()
    

    

#2. requests + re实现
import requests
import re

# 根据url获取网页html内容
def getHtmlContent(url):
    page = requests.get(url)
    return page.text

# 从html中解析出所有jpg图片的url
# 百度贴吧html中jpg图片的url格式为:<img ... src="XXX.jpg" width=...>
def getJPGs(html):
    jpgReg = re.compile(r'<img.+?src="(.+?\.jpg)" width')  # 注:这里最后加一个'width'是为了提高匹配精确度
    # 解析出jpg的url列表
    jpgs = re.findall(jpgReg,html)   
    return jpgs

# 用图片url下载图片并保存成制定文件名
####### 跟上面不一样 ######
def downloadJPG(imgUrl,fileName):
    # 可自动关闭请求和响应的模块
    from contextlib import closing
    with closing(requests.get(imgUrl,stream = True)) as resp:
        with open(fileName,'wb') as f:
            for chunk in resp.iter_content(128):
                f.write(chunk)
    
# 批量下载图片,默认保存到当前目录下
def batchDownloadJPGs(imgUrls,path = '/Users/vicky/Documents/code/python/scrapy/'):
    # 用于给图片命名
    count = 1
    for url in imgUrls:
        downloadJPG(url,''.join([path,'{0}.jpg'.format(count)]))
        print ('下载完成第{0}张图片'.format(count)) #加了这一句
        count = count + 1

# 封装:从百度贴吧网页下载图片
def download(url):
    html = getHtmlContent(url)
    jpgs = getJPGs(html)
    batchDownloadJPGs(jpgs)
    
def main():
    url = 'http://tieba.baidu.com/p/2256306796'
    download(url)
    
if __name__ == '__main__':
    main()


 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值