python爬取指定种类的图片

方法1:

转载:https://zhuanlan.zhihu.com/p/367325899

# -*- coding: utf-8 -*-
"""
 Created on 2021/4/19 11:47
 Filename   : spider_image_baidu.py
 Author     : Taosy
 Zhihu      : https://www.zhihu.com/people/1105936347
 Github     : https://github.com/AFei19911012
 Description: Spider - get images from baidu
"""

import requests
import os
import re


def get_images_from_baidu(keyword, page_num, save_dir):
    # UA 伪装:当前爬取信息伪装成浏览器
    # 将 User-Agent 封装到一个字典中
    # 【(网页右键 → 审查元素)或者 F12】 → 【Network】 → 【Ctrl+R】 → 左边选一项,右边在 【Response Hearders】 里查找
    header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
    # 请求的 url
    url = 'https://image.baidu.com/search/acjson?'
    n = 0
    for pn in range(0, 30 * page_num, 30):
        # 请求参数
        param = {'tn': 'resultjson_com',
                 # 'logid': '7603311155072595725',
                 'ipn': 'rj',
                 'ct': 201326592,
                 'is': '',
                 'fp': 'result',
                 'queryWord': keyword,
                 'cl': 2,
                 'lm': -1,
                 'ie': 'utf-8',
                 'oe': 'utf-8',
                 'adpicid': '',
                 'st': -1,
                 'z': '',
                 'ic': '',
                 'hd': '',
                 'latest': '',
                 'copyright': '',
                 'word': keyword,
                 's': '',
                 'se': '',
                 'tab': '',
                 'width': '',
                 'height': '',
                 'face': 0,
                 'istype': 2,
                 'qc': '',
                 'nc': '1',
                 'fr': '',
                 'expermode': '',
                 'force': '',
                 'cg': '',    # 这个参数没公开,但是不可少
                 'pn': pn,    # 显示:30-60-90
                 'rn': '30',  # 每页显示 30 条
                 'gsm': '1e',
                 '1618827096642': ''
                 }
        request = requests.get(url=url, headers=header, params=param)
        if request.status_code == 200:
            print('Request success.')
        request.encoding = 'utf-8'
        # 正则方式提取图片链接
        html = request.text
        image_url_list = re.findall('"thumbURL":"(.*?)",', html, re.S)
        print(image_url_list)
        # # 换一种方式
        # request_dict = request.json()
        # info_list = request_dict['data']
        # # 看它的值最后多了一个,删除掉
        # info_list.pop()
        # image_url_list = []
        # for info in info_list:
        #     image_url_list.append(info['thumbURL'])

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        for image_url in image_url_list:
            image_data = requests.get(url=image_url, headers=header).content
            with open(os.path.join(save_dir, f'{n:06d}.jpg'), 'wb') as fp:
                fp.write(image_data)
            n = n + 1


if __name__ == '__main__':
    keyword = '猫'
    save_dir = keyword
    page_num = 2
    get_images_from_baidu(keyword, page_num, save_dir)
    print('Get images finished.')

方法2:

import requests
import time
import re
import faker
faker = faker.Faker('zh_CN')

class GetPage():
    def __init__(self, url, path, circulate_time):
        headers = {
            'User-Agent' : str(faker.chrome())
        }
        self.headers = headers
        self.url = url
        self.path = path
        self.circulate_time = circulate_time

    def get_shufa(self):
        url = self.url
        # 预加载正则表达式
        obj_1 = re.compile(r'&pn=(?P<param>\d+)&',re.S)
        # 筛选出原来的pn参数
        old_param = int(obj_1.findall(url)[0])
        # 取一个完整的原来的pn参数,取这个整体用来做替换
        old_change_param = f'&pn={old_param}&'
        # 让pn参数变成0,从0开始
        start_param = f'&pn=0&'
        # 进行替换
        new_url = url.replace(old_change_param, start_param)
        param_num = 0
        url_list = []
        for i in range(self.circulate_time):
            try:
                resp_json = requests.get(url = new_url, headers = self.headers).json()
                for j in range(30):
                    url_list.append(resp_json['data'][j]['thumbURL'])
                # 循环对pn参数进行替换
                old_param = f'&pn={param_num}&'
                param_num += 30
                new_param = f'&pn={param_num}&'
                new_url = new_url.replace(old_param , new_param)
                print(f'已完成第【{i+1}】页的请求')
                time.sleep(1)
            except KeyError:
                print(f'至多只能获取【{i}】页')
                break

        page_num = 1
        url_list = list(set(url_list))
        for page_url in url_list:
            resp_content = requests.get(url = page_url, headers = self.headers).content
            with open(f'{self.path}/{page_num}.jpg','wb')as file:
                file.write(resp_content)
                print(f'下载图片 {page_num}/{len(url_list)} ')
                page_num += 1
                time.sleep(0.8)


if __name__ == '__main__':
    baidu_url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=10676196996033214004&ipn=rj&ct=201326592&is=&fp=result&fr=&word=%E8%90%BD%E7%9F%B3&queryWord=%E8%90%BD%E7%9F%B3&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&expermode=&nojc=&isAsync=&pn=150&rn=30&gsm=9600000000000096&1666077668271='
    save_path = 'D:/Workspace/local_py/落石'
    circulate_time = int(input('请输入需要爬取的循环数量(一次循环30张图片):'))
    a = GetPage(url = baidu_url, path = save_path, circulate_time = circulate_time )
    a.get_shufa()
Python爬取巨潮网(中国证监会指定上市公司信息披露网站)的年报通常需要几个步骤: 1. **安装必要的库**: 首先,你需要安装`requests`库来发送HTTP请求,以及`BeautifulSoup`库来解析HTML内容。可以使用pip命令行工具安装: ``` pip install requests beautifulsoup4 ``` 2. **定位URL结构**: 查看目标公司年报页面的URL结构,年报一般会按照年份分类。例如,https://www.cninfo.com.cn/companys/hisAnnounce?cmpid=XXX(替换为公司证券代码)。 3. **编写爬虫脚本**: 使用Python编写一个函数,输入公司代码,通过循环遍历年份,构造每个年度报告的链接。示例代码如下: ```python import requests from bs4 import BeautifulSoup def get_annual_report(url_template, company_code): url = url_template.format(company_code) # 发送GET请求 response = requests.get(url) response.raise_for_status() # 检查请求是否成功 # 解析HTML soup = BeautifulSoup(response.text, 'html.parser') # 找到年报链接(这取决于网页的具体结构,可能需要调整) annual_report_links = soup.select('.annual-report-link') # 假设class名是'.annual-report-link' for link in annual_report_links: report_year = link.text.split(' ')[0] # 提取年份 report_url = link['href'] # 获取PDF链接 download_pdf(report_url) def download_pdf(pdf_url): with requests.get(pdf_url, stream=True) as r: r.raise_for_status() filename = f'{company_code}_{report_year}.pdf' # 创建文件名 with open(filename, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) # 调用函数,传入公司代码 get_annual_report("https://www.cninfo.com.cn/companys/hisAnnounce?cmpid={}&annNo=&tabName=year", 'your_company_code') ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值