python爬虫(爬取任意贴吧网页(前一千页),并保存在本地)

这是自己照着打的第一个较为完整的爬虫

import requests

class Tieba_Spider:
def int(self,tieba_name):
self.tieba_name = tieba_name
self.url_name =“https://tieba.baidu.com/f?kw=”+tieba_name+"&ie=utf-8&pn={}"
self.headers ={“User-Agent”:“Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0”}

# 1.创建url列表
def get_url_list(self):
    url_list = []
    for i in range(1000):
        url_list.append(self.url_name.format(i*50))
    return url_list

def pares_url(self,url):
    print(url)
    respons =requests.get(url,headers=self.headers)
    return respons.content.encode()

def save_url(self,html_str,page_num):
    html_path = "{}----第{}页.html".format(self.tieba_name,page_num)
    with open(html_path,"w",encoding="utf-8") as f:  
        f.write(html_str)

def run(self):#实现主要逻辑
    #1.创建url列表
    url_list = self.get_url_list()
    #2.遍历、请求、获取数据
    for url in url_list:
        html_str = self.pares_url(url)
        #3.保存本地
        page_num = url_list.index(url)+1
        self.save_url(html_str,page_num)

if name == ‘main’:
tieba_speider = Tieba_Spider(“lol”)
tieba_speider.run()
哪位大神帮帮我啊,出现下列错误
tieba_speider = Tieba_Speider(“lol”)
TypeError: object() takes no parameters

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值