爬虫 -- 简单封装

代码

# -*- coding: utf-8 -*-
# @Author   : zbz

import hashlib
import time
import random

import requests
from lxml import etree


class BaseSpider:
    def __init__(self):
        self.session = requests.Session()

    def get_md5(self, s: str) -> str:
        m = hashlib.md5()
        m.update(s.encode("utf8"))
        return m.hexdigest()

    def get_ua(self) -> str:
        a = random.randint(55, 62)
        c = random.randint(0, 3200)
        d = random.randint(0, 140)
        os_type = [
            '(Windows NT 6.1; WOW64)', '(Windows NT 10.0; WOW64)', '(X11; Linux x86_64)',
            '(Macintosh; Intel Mac OS X 10_12_6)'
        ]
        chrome_version = 'Chrome/{}.0.{}.{}'.format(a, c, d)
        ua = ' '.join(
            ['Mozilla/5.0', random.choice(os_type), 'AppleWebKit/537.36',
             '(KHTML, like Gecko)', chrome_version, 'Safari/537.36']
        )
        return ua

    def ua_heads(self) -> dict:
        headers = {
            "User-Agent": self.get_ua()
        }
        return headers

    def dtime(self, tsp=None) -> str:
        tsp = time.time() if tsp is None else tsp
        dtime = time.strftime('%Y-%m-%d %H:%M.%S', time.localtime(tsp))
        return dtime

    def to_file(self, content, path, encoding=None):
        encoding = "utf8" if encoding is None else encoding
        with open(path, "w", encoding=encoding) as f:
            f.write(content)

    def download_img(self, url, path=None):
        path = "{}{}{}{}.png".format(*[random.randint(1, 100) for _ in range(4)]) if path is None else path
        resp = self.parse(url)
        if not resp:
            return False
        with open(path, "wb") as f:
            f.write(resp.content)

    def parse(self, url, method="GET", headers=None, params=None, cookies=None,
              data=None, json=None, allow_redirects=True, timeout=3, xpath=False):
        headers = self.ua_heads() if headers is None else headers
        if method == "GET":
            try:
                resp = self.session.get(url, headers=headers, params=params, cookies=cookies,
                                        allow_redirects=allow_redirects, timeout=timeout)
                return etree.HTML(resp.text) if xpath else resp
            except Exception as e:
                print("ERROR ==> {}".format(e))
        elif method == "POST":
            try:
                resp = self.session.post(url, headers=headers, params=params, cookies=cookies,
                                         allow_redirects=allow_redirects, json=json, data=data, timeout=timeout)
                return resp
            except Exception as e:
                print("ERROR ==> {}".format(e))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
抱歉,我是AI语言模型,无法提供封装好的python代码,但是我可以提供Scrapy微博爬虫的基本思路和代码示例: Scrapy微博爬虫的基本思路: 1. 登录微博 2. 根据关键词搜索微博,获取微博列表 3. 遍历微博列表,提取微博的相关信息,如微博ID、微博内容、发布时间、点赞数、转发数、评论数、作者信息等 4. 如果有下一页,则继续爬取下一页的微博列表,重复2-3步骤 5. 将提取的微博信息保存到本地或远程数据库中 Scrapy微博爬虫的代码示例: 1. 在命令行中创建一个Scrapy项目: scrapy startproject weibo 2. 在weibo/spiders目录下创建一个名为weibospider.py的爬虫文件: import scrapy from scrapy.http import Request class WeiboSpider(scrapy.Spider): name = "weibo" allowed_domains = ["weibo.com"] start_urls = [ "https://weibo.com/" ] def start_requests(self): login_url = 'https://login.weibo.cn/login/' yield Request(url=login_url, callback=self.login) def login(self, response): # 在这里实现微博登录的逻辑 # ... # 登录成功后,调用parse方法开始爬取微博 yield Request(url=self.start_urls[0], callback=self.parse) def parse(self, response): # 在这里实现根据关键词搜索微博的逻辑 # 从搜索结果页面获取微博列表 # ... # 遍历微博列表,提取微博的相关信息 for weibo in weibo_list: weibo_id = weibo.get('id') weibo_content = weibo.get('content') publish_time = weibo.get('publish_time') likes = weibo.get('likes') reposts = weibo.get('reposts') comments = weibo.get('comments') author = weibo.get('author') # 将提取的微博信息保存到本地或远程数据库中 # ... # 如果有下一页,则继续爬取下一页的微博列表 next_page = response.xpath('//a[text()="下一页"]/@href').extract_first() if next_page: yield Request(url=next_page, callback=self.parse) 3. 在命令行中运行爬虫: scrapy crawl weibo 以上是一个简单的Scrapy微博爬虫示例,具体实现需要根据实际情况进行调整和完善。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值