Python爬虫一则

就是个python爬虫

就像爬个图看看
源网站链接:http://www.setuw.com
使用python编写,使用了threadpool 等库,自行下载。
环境:python 3 , win10 , 树莓派环境下测试通过

网站元素结构

在这里插入图片描述
在这里插入图片描述

代码
# -*- coding: utf-8 -*
from concurrent.futures import ThreadPoolExecutor
import urllib.request
import _thread
import json
import threadpool  
from time import sleep
from bs4 import BeautifulSoup 
import os
import random


maxThreadCount = 8

available_thread = 8

baseDomain="http://www.setuw.com"
intrance = "http://www.setuw.com/tag/rosi/"

#网站分类对应的目录
tags = [ "/tag/rosi/", "/tag/tuigirl/" , "/tag/ugirls/" ,
"/tag/xiuren/" , "/tag/disi/" , "/tag/dongman/" , "/tag/xinggan/" , 
"/tag/qingchun/" , "/tag/youhuo/" , "/tag/mote/" , "/tag/chemo/" ,
"/tag/tiyu/" , "/tag/zuqiubaobei/" , "/meinv/liuyan/"
]

types = ["ROSI" , "推女郎" , " 尤果" , "  秀人 " , 
" DISI " , "动漫 " , "性感 " , "清纯 " , " 诱惑 " , " 模特 " , " 车模" , "体育" , " 足球" , " 柳岩" ]

typeSize = len(types)
path =  ""
header = {
    "User-Agent":'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
 
    'Accept': '*/*',
    'Accept-Language': 'en-US,en;q=0.8',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive'
}



def Download(argv1):
    url = argv1.split("#")[0]
    title = argv1.split("#")[1]
    name = argv1.split("#")[2]
    #print("URl is " , url, " , title is " , title , " , name is "  , name)
    print("Download processing:" , argv1.split("#")[3])
    apath = path+"/" + title  + "/"
    #print(apath)
    if not os.path.exists(apath): #判断系统是否存在该路径,不存在则创建
        os.makedirs(apath)
    urllib.request.urlretrieve( url, '{0}{1}.jpg'.format(apath, name)) # ,下载图片保存在本地
    return

    

def run(targetUrl,title):
    global available_thread
    print("downloading " + title)
    req = urllib.request.Request(url=targetUrl,headers=header)
    response = urllib.request.urlopen(req)#1111111这里的req可看成一种更为高级的URL
    html = response.read().decode('utf-8','ignore')
    soup = BeautifulSoup(html, 'html.parser')


    imgs = soup.find_all('img') 
    size = len(imgs)
    resules = 1
    with ThreadPoolExecutor(maxThreadCount) as pool:
        for i in range(2,size-16):
            #已经证实过,页面中抓取的img,2到size-6为图集图片
            data =  imgs[i]["datas"]
            all = data.split("'")
            '''
            参数列表:下载链接,图集名,图片名,图集下载进度
            '''
            argv ={ all[len(all) - 2] + "#" + title  + "#" + all[len(all) - 2].split(".")[1]+str(i) + "#" + str(i-1) + "/" + str(size-18) }
            results = pool.map(Download,(argv)) #使用map添加线程进线程池
    print(title  , " download successfull;")
    return



if __name__ == '__main__':
    '''自定义下载路径。若输入.,则下载至当前目录,跳过则下载到/home/hdd/picDl/(这是我)
    自己的硬盘挂载点。。。可自定义)'''
    input1 = input("input a folder(. as ./ , none as /home/hdd/picDl/):")
    if input1==".":
        path = "./"
    elif input1=="":
        path = "/home/hdd/picDl/"
    else:
        path = input1
    print("Path seted to " + path)
    #选择一个下载类别。在网站最上方有,我是手动找出来的,有时效性
    for i in range(0,len(types)-1):
        print("| " + str(i)+ " | " + types[i] + " | ")
    print("select a type to download , ")
    index = input(" or input nothing to download index page:")
    if index == "":
        intrance = intrance
    else:
        index1 = int(index)
        if index1 < len(types)-1 and index1 > 0 :
            intrance = baseDomain + tags[index1]
        else:
            print("something wrong , setting download tartget as default")
            intrance = intrance
    print( intrance +  " is going to download.")
    '''
    自定义下载线程数。注意,函数中每个线程用于下载一张图片,
    所以只能说是多个图片并行下载。
    '''
    maxThreadCount_ = input("input a number if you want to modify default thread number:")
    if maxThreadCount_ == "" :
        print("using default number:" , maxThreadCount)
    else :
        print("Modified number to:" , maxThreadCount_)
        maxThreadCount = int(maxThreadCount_)
    req = urllib.request.Request(url=intrance,headers=header)
    response = urllib.request.urlopen(req)
    html = response.read().decode('utf-8','ignore')
    #解码 得到这个网页的html源代码
    soup = BeautifulSoup(html, 'html.parser')
    Divs = soup.find_all('a',attrs={'class':'a1' }) 
    for div in Divs:
        if div["href"] is None:
            print("没有图集了")
#            return
        elif div['href'] is None or div['href']=="": #有链接,但是是 空链接
            print("图集没有连接")
#            return
        else:
            targetUrl= baseDomain + div['href']
            title=div["title"]
            print("正在下载套图:" + title)
            run(targetUrl,title)

缺点(改进方向):

  1. 目前只下载分类第一页的图集
  2. 不能保存下载进度。
  3. 没了吧。。欢迎批评指正
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值