单线程顺序爬取图片以及对应的标签

import requests
from bs4 import BeautifulSoup
import json
import lxml
import re
import os
import os.path
import string
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)#用于消除warning


path = 'D:/httpswwwpexelscomzh-tw/'
def dwonload_img(page):
    global count
    url = f'https://www.pexels.com/zh-tw/?format=js&seed={page}&type='
    res = requests.get(url, headers={
        'Accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Referer': 'https://www.pexels.com/zh-tw/',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36 Edg/99.0.1150.39',
        'Cookie': '_ga=GA1.2.953246023.1646290358; _hjSessionUser_171201=eyJpZCI6ImI4NzRmMjRiLWRlMjQtNWZlMi1hNjRkLWUyMDA2MjRmODFlYiIsImNyZWF0ZWQiOjE2NDYyOTA5MDI2NTcsImV4aXN0aW5nIjp0cnVlfQ==; locale=zh-TW; NEXT_LOCALE=zh-TW; _gid=GA1.2.572006961.1647307445; ab.storage.sessionId.5791d6db-4410-4ace-8814-12c903a548ba=%7B%22g%22%3A%223c77dd5a-d80d-5514-6e89-a515667aaba1%22%2C%22e%22%3A1647336874054%2C%22c%22%3A1647335074055%2C%22l%22%3A1647335074055%7D; ab.storage.deviceId.5791d6db-4410-4ace-8814-12c903a548ba=%7B%22g%22%3A%22b43b9c98-d28a-dfdb-1c65-c0f58a7b3689%22%2C%22c%22%3A1646290357434%2C%22l%22%3A1647335074056%7D; _gat=1; __cf_bm=MtqVbgDKWWQPM0LD5MJ2yjHVLCK1HddcW169X4OgWPQ-1647335087-0-AZcu7uQ0AlxdR8R5ANaYX7c9fCDbKBKL4XGoxTSgYCC0zon2SKUWtTvwSNBk4TyzHLrRW0TFq2RYZY42JOW2sbMjYFx3eMvD2mZc4zNwlDH9HAaGN9/HdaiOuvGR0+ClhwtVkqHRgkLtH4jt2DpznUh7yzGGojmH8CG5MBNuWrny'
        }, proxies={'http': 'http://127.0.0.1:18888', 'https': 'http://127.0.0.1:18888'}, verify=False)

    # 获取img_url
    text = res.text
    print(res.status_code)
    all_url = re.findall("data-big-src=\\\\\"(http.+?)\"", text)  # 正则表达式匹配

    # dwonload
    for img in all_url:
        pattern = re.compile(r'\d+')
        id = pattern.findall(img)[0]
        str_id = str(id)
        dwonload_url = f'https://images.pexels.com/photos/{str_id}/pexels-photo-{str_id}.jpeg'
        print(dwonload_url)
        text_label_url = f'https://www.pexels.com/zh-tw/medium/below-the-fold-html/{str_id}'
        print(text_label_url)
        if os.path.isdir(path + str(count)):
            pass
        else:
            os.mkdir(path + str(count))

        document_path = path + str(count)
        pic_path = document_path + '/' + str(count) + '.jpg'  # 这里用了‘/’来构成路径
        pic_path1= document_path + '/' + str(count) + '.txt'  # 这里用了‘/’来构成路径

        txt_res = requests.get(text_label_url, headers={
            'Accept': '*/*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
            'Referer': 'https://www.pexels.com/zh-tw/photo/10895238/',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.46',
            'Cookie': '_ga=GA1.2.953246023.1646290358; _hjSessionUser_171201=eyJpZCI6ImI4NzRmMjRiLWRlMjQtNWZlMi1hNjRkLWUyMDA2MjRmODFlYiIsImNyZWF0ZWQiOjE2NDYyOTA5MDI2NTcsImV4aXN0aW5nIjp0cnVlfQ==; locale=zh-TW; NEXT_LOCALE=zh-TW; _gid=GA1.2.2081165861.1647910986; _hjDonePolls=789223; ab.storage.sessionId.5791d6db-4410-4ace-8814-12c903a548ba=%7B%22g%22%3A%22bebb80c0-1ee3-1ccf-c271-6ac41bcceb8e%22%2C%22e%22%3A1647930602774%2C%22c%22%3A1647928802775%2C%22l%22%3A1647928802775%7D; ab.storage.deviceId.5791d6db-4410-4ace-8814-12c903a548ba=%7B%22g%22%3A%22b43b9c98-d28a-dfdb-1c65-c0f58a7b3689%22%2C%22c%22%3A1646290357434%2C%22l%22%3A1647928802776%7D; __cf_bm=yrbilVkKGf38kUC_5_2l6Ag44u66f.1cokHQBcZVrjY-1647928814-0-ATkWmZ0XheVsSf/yAg/ExhtPd326lbv/bitMIprXmNYNZeRBtiYo8VPCiW3Hrm9fnTknvr47y6cXCRw9wTNabQ7xw6y4vP4Z/c4Ur/8NzxbcIcCS1FoLW3Xw2wqaxYQa8LEINWBcno32yCSWK9h6RZqCcFq3dAkeAMoIo1poSFTZ; _gat=1'
        }, proxies={'http': 'http://127.0.0.1:18888', 'https': 'http://127.0.0.1:18888'}, verify=False)

        soup = BeautifulSoup(txt_res.content, 'html.parser')
        body = soup.find('ul', attrs={'class': 'photo-page__related-tags__container'})
        flag = 1
        if body is None:
            flag = 0
        if flag == 1:
            fp = open(pic_path1, 'w')  # 写入图片的话是要用二进制形式写入
            for li in body.find_all('a', attrs={'class': 'rd__tag'}):
                fp.write(li.text)
                fp.write('\n')
            fp.close()

        byte = requests.get(dwonload_url, headers={
            'Accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Referer': 'https://www.pexels.com/zh-tw/',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36 Edg/99.0.1150.39',
            'Cookie': '_ga=GA1.2.953246023.1646290358; _hjSessionUser_171201=eyJpZCI6ImI4NzRmMjRiLWRlMjQtNWZlMi1hNjRkLWUyMDA2MjRmODFlYiIsImNyZWF0ZWQiOjE2NDYyOTA5MDI2NTcsImV4aXN0aW5nIjp0cnVlfQ==; locale=zh-TW; NEXT_LOCALE=zh-TW; _gid=GA1.2.572006961.1647307445; ab.storage.sessionId.5791d6db-4410-4ace-8814-12c903a548ba=%7B%22g%22%3A%223c77dd5a-d80d-5514-6e89-a515667aaba1%22%2C%22e%22%3A1647336874054%2C%22c%22%3A1647335074055%2C%22l%22%3A1647335074055%7D; ab.storage.deviceId.5791d6db-4410-4ace-8814-12c903a548ba=%7B%22g%22%3A%22b43b9c98-d28a-dfdb-1c65-c0f58a7b3689%22%2C%22c%22%3A1646290357434%2C%22l%22%3A1647335074056%7D; _gat=1; __cf_bm=MtqVbgDKWWQPM0LD5MJ2yjHVLCK1HddcW169X4OgWPQ-1647335087-0-AZcu7uQ0AlxdR8R5ANaYX7c9fCDbKBKL4XGoxTSgYCC0zon2SKUWtTvwSNBk4TyzHLrRW0TFq2RYZY42JOW2sbMjYFx3eMvD2mZc4zNwlDH9HAaGN9/HdaiOuvGR0+ClhwtVkqHRgkLtH4jt2DpznUh7yzGGojmH8CG5MBNuWrny'
            }, proxies={'http': 'http://127.0.0.1:18888', 'https': 'http://127.0.0.1:18888'}, verify=False).content
        fp = open(pic_path, 'wb')  # 写入图片的话是要用二进制形式写入
        fp.write(byte)
        fp.close()
        count += 1
        print(count)

    # 获取下一页
    next = re.findall(r"seed=.+?\\", text)[0]  # 实现的难点
    while len(next) == 0:
        print('总数为:', count)
        exit()

    next = next.strip('\\')  # 去除转义符号
    page_next = next[5:]
    page_next = str(page_next)
    print(page_next)
    dwonload_img(page_next)


if __name__ == "__main__":
    page = '2022-03-21%2012%3A18%3A51%20UTC'
    count = 0
    dwonload_img(page)

网站为:免費圖庫相片 · Pexels

其中爬的链接是从fiddlers中查看到的

其中发现有图片没有标签

 图1(网页中看不到标签)

    图2(没有标签的)

 图3(有标签的源码)

因此使用flag,来使得代码只获取有标签的,不出现找不到ul的情况

body = soup.find('ul', attrs={'class': 'photo-page__related-tags__container'})
        flag = 1
        if body is None:
            flag = 0
        if flag == 1:
            fp = open(pic_path1, 'w')  # 写入图片的话是要用二进制形式写入
            for li in body.find_all('a', attrs={'class': 'rd__tag'}):
                fp.write(li.text)
                fp.write('\n')
            fp.close()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Python是一种灵活的编程语言,其提供了多种多样的库和框架,以方便用户处理数据和进行网络。在网络方面,Python具有优秀的单线程和多线程能力。 Python单线实例: 当我们需要一个简单的网站时,单线可能是最简单和最有效的方法。例如,我们可以编写一个程序来一个网站的所有页面,并将它们保存到本地文件夹中。这个程序可能像这样: ```Python import requests from bs4 import BeautifulSoup def getUrls(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') urls = [] for link in soup.find_all('a'): urls.append(link.get('href')) return urls def download(urls): for url in urls: response = requests.get(url) filename = url.split('/')[-1] with open(filename, 'wb') as f: f.write(response.content) if __name__ == '__main__': urls = getUrls('http://example.com') download(urls) ``` 在这个例子中,我们使用requests和BeautifulSoup库来获和解析HTML页面,然后使用循环和文件I/O来保存页面内容。 Python多线程实例: 当我们需要大量页面时,单线可能会非常缓慢,因此我们可以使用多线程来提高效率。例如,我们可以使用Python的多线程库threading来实现多线程。下面是代码示例: ```Python import requests from bs4 import BeautifulSoup import threading def getUrls(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') urls = [] for link in soup.find_all('a'): urls.append(link.get('href')) return urls def download(url): response = requests.get(url) filename = url.split('/')[-1] with open(filename, 'wb') as f: f.write(response.content) class CrawlerThread(threading.Thread): def __init__(self, url): threading.Thread.__init__(self) self.url = url def run(self): download(self.url) if __name__ == '__main__': urls = getUrls('http://example.com') threads = [] for url in urls: t = CrawlerThread(url) threads.append(t) t.start() for t in threads: t.join() ``` 在这个例子中,我们使用多线程CrawlerThread类来下载每个页面。我们创建一个CrawlerThread列表,然后将列表中的每个元素作为参数传递给download函数,以便每个线程都可以执行下载任务。最后,我们使用join方法等待所有线程完成。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值