豆瓣爬虫练习

import csv
import os

from lxml import etree
import  requests

header={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}


def get(url):
    list = []
    res=requests.get(url,headers=header)
    html=etree.HTML(res.content)
    titles=html.xpath('//div[@class="pl2"]/a/@title')
    authors=html.xpath('//p[@class="pl"]/text()')
    evaluates=html.xpath('//span[@class="pl"]/text()')
    links=html.xpath('//a[@class="nbg"]/@href')
    jpgs=html.xpath('//a[@class="nbg"]/img/@src')
    for title ,author,evaluate,link ,jpg in zip(titles,authors,evaluates,links,jpgs):
        dic = {}
        e=evaluate
        e=e.replace('\n','').replace(" ",'')
        dic['书名']=title
        dic['作者']=author
        dic['评论数']=e
        dic['链接'] = link
        dic['图片地址']=jpg
        list.append(dic)
        os.makedirs("C:\\豆瓣\\"+title)
        pic=requests.get(jpg,headers=header)
        res=requests.get(link,headers=header)
        html=etree.HTML(res.text)
        jianjie = html.xpath('//div[@class="intro"]//text()')
        print(list)
        with open("C:\\豆瓣\\"+title+"\\"+title+'.jpg', 'ab') as f:

            for chunk in pic.iter_content(chunk_size=1000):
                if chunk:
                    f.write(chunk)
        with open("C:\\豆瓣\\"+title+"\\"+title+'.txt', 'a',encoding='utf-8') as f:
            try:
                f.write(jianjie[1])
            except UnicodeEncodeError as e:
                pass
        with open("douban.csv", 'w', newline="") as f:
            file = csv.writer(f)
            for cow in list:
                file.writerow(cow)
    return list


for n in range(0,25,25):
    url="https://book.douban.com/top250?start="+str(n)

    a=get(url)


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值