python贴吧爬虫-python贴吧爬虫

# !usr/bin/env python

# -*- coding:utf-8 _*-

"""

@Author:czk

@File:tieba_spider.py

@Time:2020/6/6 21:31

@Motto:积极向上

"""

import requests

class TiebaSpider:

def __init__(self, tieba_name):

self.tieba_name = tieba_name

self.url_temp = 'https://tieba.baidu.com/f?kw=' + tieba_name + '&ie=utf-8&pn={}'

self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}

def get_url_list(self):

# url_list = []

#

# for i in range(100):

# url_list.append(self.url_temp.format(i*50))

# return url_list

return [self.url_temp.format(i*50) for i in range(1000)]

def parse_url(self, url, page_num):

print('---start{}---'.format(page_num))

response = requests.get(url, headers=self.headers)

return response.content.decode()

def save_html(self, html_str, page_num):

file_path = '{}-第{}页.html'.format(self.tieba_name, page_num)

with open(file_path, 'w', encoding='utf-8') as f:

f.write(html_str)

def run(self):

# 1.构造url请求列表

# 2. 遍历,发送请求,获取响应

# 3.保存

url_list = self.get_url_list()

for url in url_list:

page_num = url_list.index(url) + 1

html_str = self.parse_url(url, page_num)

# page_num = url_list.index(url)+1

self.save_html(html_str, page_num)

if __name__ == '__main__':

tieba_spider = TiebaSpider("LOL")

tieba_spider.run()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值