Python之某鱼爬虫

爬取某鱼代码

from selenium import  webdriver
import time

class DouyuSpider:
    def __init__(self):
        self.start_url = "https://www.douyu.com/directory/all"
        self.driver = webdriver.Chrome()

    def get_content_list(self):
        li_list = self.driver.find_elements_by_xpath("//ul[@class='live-list-contentbox']/li")
        content_list = []
        for li in li_list:
            item = {}
            item["room_img"]=li.find_elements_by_xpath(".//span[@class='imgbox']/img").get_attribute("src")
            item["room_title"] = li.find_elements_by_xpath("./a").get_attribute("src")
            item["room_cate"] = li.find_elements_by_xpath(".//span[@class-'tag ellipsis']").text
            item["anchor_name"] = li.find_elements_by_xpath(".//span[@class='dy-name ellipsis fl']").text
            item["watch_num"] = li.find_elements_by_xpath(".//span[@class='dy-num fr']").text
            print(item)
            content_list.append(item)
            next_url = self.driver.find_elements_by_xpath("//a[@class='shark-pager-next']")
            next_url = next_url[0] if len(next_url)>0 else None
            return content_list,next_url

    def save_content_list(self,content_list):
        pass

    def run(self):
        self.driver.get(self.start_url)

        content_list,next_url = self.get_content_list()

        self.save_content_list(content_list)

        while next_url is not None:
            next_url.click()
            time.sleep(3)
            content_list,next_url = self.get_content_list()
            self.save_content_list(content_list)


if __name__ == '__main__':
    douyu = DouyuSpider()
    douyu.run()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值