爬取斗图表情包

用于技术交流和代码保存


import requests,os
import re,random
import json,time

class DoutuCrawl:
    def __init__(self,keyword=None,type='photo',more=1,page=2):
        self.locationLink ='https://www.doutula.com/'
        self.keyword=keyword
     

    def concatLink(self):
        self.Htmlurl = self.locationLink +'search?type=photo&more=1&keyword='+self.keyword+'&page=1'
        ResponeHmtl,Maxpage = self.GetHtml(self.Htmlurl)
        infoList= []
        for i in range(Maxpage):
            self.Htmlurl = self.locationLink +'search?type=photo&more=1&keyword='+self.keyword+'&page='+str(i)
            infoList.append(self.Htmlurl)
        self.GetImage(infoList)

    def GetImage(self,infoList):
        imglist = []
        titleList = [] 
        timelist = []
        tmp_list=[]
        for i in range(len(infoList)):
            
            try:
                imageurl = infoList[i+1]
                imAgeHmtl= self.GetHtml(imageurl)
                iMg = re.compile('data-original="(.*?)"')
                iMgAll=re.findall(iMg,imAgeHmtl[0])
                for i in range(len(iMgAll)):
                    imglist.append(iMgAll[i])
                    self.savaimg(iMgAll[i])
                title = re.compile('<p style="display: none">(.*?)</p>')
                tileAll = re.findall(title,imAgeHmtl[0])
                for i in range(len(tileAll)):
                    titleList.append(tileAll[i])
                    timelist.append(time.strftime("%Y-%m-%d ", time.localtime()) )

            except:
                print("error")
            alllist = zip(imglist,titleList,timelist)
            for each,i,n in alllist:
                data = {}
                data['coverimg'] = each
                data['title'] = i
                data['updata'] =time.strftime("%Y-%m-%d ", time.localtime())
                tmp_list.append(data)

            print(tmp_list)

    def savaimg(self,imgurl):
        path = os.getcwd()
        general = path+"\\Enrichment\\"  # 总目录
        path1=general+imgurl.split("/")[-1]
        if not os.path.exists(general):
            os.mkdir(general)
        writeimag = requests.get(imgurl)
        writeimag1= writeimag.content
        with open(path1, 'wb') as f:
            f.write(writeimag1)
            f.close()
    def GetHtml(self,Htmlurl):
        headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
                'Host': 'www.doutula.com',
                "Upgrade-Insecure-Requests":'1',
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'                                   
                }
        proxies = ["115.218.222.64:9000", "120.194.18.90:81", "123.160.74.11:9999"]
        try:
            request = requests.session()
            
            response = request.get(Htmlurl, timeout=30,  proxies={'http': random.choice(proxies)},headers=headers)
            response.encoding = 'utf-8'
            # print(response.apparent_encoding)
            ResponeHmtl = response.text
            pages = re.compile(r'<li class="page-item"><a class="page-link(.*?)</a></li>') 
            ResponeHmtl = response.text
            pages = re.findall(pages,ResponeHmtl)[-1]
            Maxpage = int(re.search(r'>(\d+)',pages).group(1)  )     
            return ResponeHmtl,Maxpage

        except:
            return print("Exceptional error occurred")


keyword = '傻逼'
Doutu = DoutuCrawl(keyword)
Doutu.concatLink()


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值