爬虫 妹子图

import requests
from lxml import etree
import os


source_url = 'http://www.mmjpg.com'

s = requests.Session()
s.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
s.headers['Cache-Control']='max-age=0'
s.headers['Connection']='keep-alive'
s.headers['Accept-Encoding']='gzip, deflate, sdch'
s.headers['Upgrade-Insecure-Requests']='1'
# s.headers['Host']='www.mmjpg.com'
s.headers['Referer']='http://www.mmjpg.com/'
s.headers['Accept-Language']='zh-CN,zh;q=0.8'
s.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'

def downpage(droute, ele_con_url, No=1):
    print(No)
    con = requests.get(ele_con_url)
    page_con = con.content.decode('utf-8')
    page_con = etree.HTML(page_con)
    down_url = page_con.xpath('//div[@class="content"]/a/img/@src')[0]
    print(down_url)

    with open(droute+str(No)+'.jpg', 'wb') as f:
        con = s.get(down_url)
        # print(con)
        f.write(con.content)
    try:
        bldown = page_con.xpath('//div[@class="page"]/a[text()="下一张"]/@href')[0]
    except:
        print("pictures of this lady have been downloaded ")
    else:
        if bldown:
            ele_con_url = source_url + bldown
            No = No + 1
            downpage(droute, ele_con_url, No)


con_url = source_url
page = 0
while True:
    page = page +1
    print(con_url)
    con = s.get(con_url)
    print()
    page_content = etree.HTML(con.content.decode('utf-8'))
    item_data = page_content.xpath('//div[@class="pic"]/ul/li')
    item_len = len(item_data)
    print(item_len)

    print("page :%d" % page)

    for eve_item in item_data:
        ele_con_url = eve_item.xpath('a/@href')[0]
        ele_name = eve_item.xpath('a/img/@alt')[0]
        print(ele_con_url)
        if os.path.exists(ele_name):
            continue
        else:
            os.mkdir(ele_name)
            downpage(ele_name + '/', ele_con_url)
        print(ele_con_url,ele_name)


    try:
        blnextpage = page_content.xpath('//div[@class="page"]/a[text()="下一页"]/@href')[0]
    except:
        print("pictures of all ladies have been downloaded ")
        break
    else:
        con_url = source_url + blnextpage



总结:

1.etree xpath提取时都是链表,一般取第一个元素,刚刚忘了,花了不少时间去排错。

2.编写的时候遇到了图片可以下载无法打开的问题,原因不是在于图片下载本身,而是服务器发送过来的数据根本是错误的。

1.错误状态值,不是200

2.转为其他图片

解决方法:headers尽量写完整,使用session时,host应省去

3. try:

except:

else:

非常好用,查找页面是否有 下一页 按钮判断是否跳转下一页,在这一直接提取 下一页的链接,当页面没有下一页按钮时,根本无法提取链接此时会报错,使用except 与 else 判断是否到页面尽头。


4.下载页面函数可以改为非递归。

如:

import requests
from lxml import etree
import os


source_url = 'http://www.mmjpg.com'

s = requests.Session()
s.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
s.headers['Cache-Control']='max-age=0'
s.headers['Connection']='keep-alive'
s.headers['Accept-Encoding']='gzip, deflate, sdch'
s.headers['Upgrade-Insecure-Requests']='1'
# s.headers['Host']='www.mmjpg.com'
s.headers['Referer']='http://www.mmjpg.com/'
s.headers['Accept-Language']='zh-CN,zh;q=0.8'
s.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'

def downpage(droute, ele_con_url, No=1):

    while True:
        print(No)
        con = requests.get(ele_con_url)
        page_con = con.content.decode('utf-8')
        page_con = etree.HTML(page_con)
        down_url = page_con.xpath('//div[@class="content"]/a/img/@src')[0]
        print(down_url)

        with open(droute+str(No)+'.jpg', 'wb') as f:
            con = s.get(down_url)
            # print(con)
            f.write(con.content)
        try:
            bldown = page_con.xpath('//div[@class="page"]/a[text()="下一张"]/@href')[0]
        except:
            print("pictures of this lady have been downloaded ")
            break
        else:
            ele_con_url = source_url + bldown
            No = No + 1


con_url = source_url
page = 0
while True:
    page = page +1
    print(con_url)
    con = s.get(con_url)
    print()
    page_content = etree.HTML(con.content.decode('utf-8'))
    item_data = page_content.xpath('//div[@class="pic"]/ul/li')
    item_len = len(item_data)
    print(item_len)

    print("page :%d" % page)

    for eve_item in item_data:
        ele_con_url = eve_item.xpath('a/@href')[0]
        ele_name = eve_item.xpath('a/img/@alt')[0]
        print(ele_con_url)
        if os.path.exists(ele_name):
            continue
        else:
            os.mkdir(ele_name)
            downpage(ele_name + '/', ele_con_url)
        print(ele_con_url,ele_name)


    try:
        blnextpage = page_content.xpath('//div[@class="page"]/a[text()="下一页"]/@href')[0]
    except:
        print("pictures of all ladies have been downloaded ")
        break
    else:
        con_url = source_url + blnextpage


欢迎大家提出宝贵意见




  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值