【爬虫】简书首页信息爬取

import requests
from lxml import etree
import pymongo
from multiprocessing import Pool

client = pymongo.MongoClient('localhost', 27017)
mydb = client['mydb']
jianshu_shouye = mydb['jianshu_shouye']

def get_jianshu_info(url):
    html = requests.get(url)
    selector = etree.HTML(html.text)
    infos = selector.xpath('//ul[@class="note-list"]/li')
    for info in infos:
        try:
            author = info.xpath('div/div[1]/div/a/text()')[0]
            time = info.xpath('div/div[1]/div/span/@data-shared-at')[0]
            title = info.xpath('div/a/text()')[0]
            content = info.xpath('div/p/text()')[0].strip()
            view = info.xpath('div/div[2]/a[1]/text()')[1].strip()
            comment = info.xpath('div/div[2]/a[2]/text()')[1].strip()
            like = info.xpath('div/div[2]/span[1]/text()')[0].strip()
            rewards = info.xpath('div/div[2]/span[2]/text()')
            if len(rewards) == 0:
                reward = '无'
            else:
                reward = rewards[0].strip()
            data = {
                'author':author,
                'time':time,
                'title':title,
                'content':content,
                'view':view,
                'comment':comment,
                'like':like,
                'reward':reward
            }
            jianshu_shouye.insert_one(data)
        except IndexError:
            pass

if __name__ == '__main__':
    urls = ['http://www.jianshu.com/c/bDHhpK?order_by=commented_at&page={}'.format(str(i)) for i in range(1,10001)]
    pool = Pool(processes=4)
    pool.map(get_jianshu_info, urls)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Vax_Loves_1314

打赏的小盆友都很可耐~

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值