爬虫实践-爬取简书网热评文章

jianshuwangarticle.py:

import requests
from lxml import etree
import pymongo
from multiprocessing import Pool

# 连接数据库
client = pymongo.MongoClient('localhost', 27017)
mydb = client['mydb']
jianshu_shouye = mydb['jianshu_shouye']


def get_jianshu_info(url):
html = requests.get(url)
selector = etree.HTML(html.text)
infos = selector.xpath('//ul[@class="note-list"]/li')
for info in infos:
try:
author = info.xpath('div/div[1]/div/a/text()')[0]
time = info.xpath('div/div[1]/div/span/@data-shared-at ')[0]
title = info.xpath('div/a/text()')[0]
content = info.xpath('div/p/text()')[0].strip()
view = info.xpath('div/div[2]/a[1]/text()')[1].strip()
comment = info.xpath('div/div[2]/a[2]/text()')[1].strip()
like = info.xpath('div/div[2]/span[1]/text()')[0].strip()
rewards = info.xpath('div/div[2]/span[2]/text()')
if len(rewards) == 0:
reward = '无'
else:
reward = rewards[0].strip()
data = {
'author': author,
'time': time,
'title': title,
'content': content,
'view': view,
'comment':comment,
'like': like,
'reward': reward
}
jianshu_shouye.insert_one(data)
except IndexError:
pass


if __name__ == '__main__':
urls = ['http://www.jianshu.com/c/bDHhpK?order_by=commented_at&page={}'.format(str(i))
for i in range(1, 2000)]
# 创建进程池
pool = Pool(processes=4)
# 调用进程爬虫
pool.map(get_jianshu_info, urls)

 

转载于:https://www.cnblogs.com/silverbulletcy/p/8021925.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值