python爬虫思路分析_Python爬虫作业 | 爬取简书用户思路分析及代码

简书大学堂

最近几个月除了工作需要打开的网页之外点开最多的就是简书

再加上最近对爬虫的学习也一直在进行中,于是乎就再爬完拉勾职位信息之后,便将目光锁定在jianshu上了,当然这也是一项实战作业的一部分

初步的思路

今天在用Scrapy写代码的时候,对网页的结构也有了大致的分析,再加上之前罗罗攀的思路,初步我是通过专题入口

热门专题

image.png

image.png

专题管理员 (一般粉丝、文章、字数、收获喜欢、这几项数据都非常漂亮)

image.png

image.png

以上红框里的数据项就是我需要爬取的字段

但是以上的思路存在一点的问题:

存在一些简书用户并不是一些热门专题的管理员,但是其人气粉丝量也很高,这个思路可能无法将这些用户爬取下来

进阶的思路

热门专题

专题关注的人

专题关注的人的动态

** 推荐作者 粉丝信息**

image.png

image.png

image.png

优点:

数据大而全,基本包含了99%的用户(个人猜测,不严谨)

缺点:

因为许多用户不止关注一个专题,而且其中包含了大量的新注册用户(数据很多为空),并且也有大量重复数据需要去重

代码部分:

jianshu.py 还在调试阶段,待更新...

# -*- coding: utf-8 -*-

import sys

import json

import requests

import scrapy

import re

from lxml import etree

from scrapy.http import Request

reload(sys)

sys.path.append('..')

sys.setdefaultencoding('utf-8')

class jianshu(scrapy.Spider):

name = 'jianshu'

# topic_category = ['city']

topic_category = ['recommend', 'hot', 'city']

base_url = 'http://www.jianshu.com/recommendations/collections?page=%s&order_by=%s'

cookies={

'UM_distinctid': '15b89d53a930-02ab95f11ccae2-51462d15-1aeaa0-15b89d53a9489b',

'CNZZDATA1258679142': '1544557204-1492664886-%7C1493280769',

'_session_id': 'Q3RteU9BeTA3UVh1bHp1d24ydmZJaGdkRDZJblE3SWg3dTlNR2J1WmJ5NS9HNlpOZVg4ZUk0TnNObE5wYXc3SjhYcU5WR0NKZ3RhcE9veFVDU2RNWkpqNE44MWxuVmtoR1ZDVXBFQ29Kc1kzZmd4SVNZakJyWVN4c1RFQXZNTFhmUUtxemVDVWlVU1l3VW92NFpTeEE2Q0ppUVN0QVFEMUpLZjFHdHViR21zZko2b1lFTW9DR08yNDh5Z0pvd0VJRzc4aFBqRnZYbGt6QXlmSzMxdU1QTVFwUVcxdUViaElqZzh2Y1RwcENtSWxWbW5PMUVGZ2UrZ2xVcm1NTlpMK2x2UTdOWlZjUVNPK1dCTERpMnd6U3ZxbXlROENML2VseTRHUTBqbFE1ZUlqN1FqazJJK0tsV1htdEt1bnl5MkhCbHNJTmh1ejFLTW9pYVcrVmx0bit1blNXV1VCQ3JNbHAvK1Z5T1ZvUk5IMVMzR1dUNHBlWFZBamcwYjQxSzBjZVRvMGRZSDRmV0xtTGZHekF1M3V6dGcwMHhpQ24zdmVKelV5eDRFSWZ4QT0tLW1uSXNLakp6SW54SUo0QU16a2dFSkE9PQ%3D%3D--0849c37208f8c573960d857029c7d6a15145c419',

'remember_user_token':'W1szNDgxMjU3XSwiJDJhJDEwJDlSS3VLcFFWMlZzNFJuOFFNS1JQR3UiLCIxNDk0MjEzNDQ3LjYwODEwNzgiXQ%3D%3D--9241542a4e44d55acaf8736a1d57dd0e96ad4e7a',

'_ga': 'GA1.2.2016948485.1492666105',

'_gid': 'GA1.2.382495.1494550475',

'Hm_lpvt_0c0e9d9b1e7d617b3e6842e85b9fb068': '1494550475',

'Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068': '1494213432,1494213612,1494321303,1494387194'

}

headers = {

'Accept-Encoding': 'gzip, deflate, sdch',

'Accept - Language': 'zh - CN, zh;q = 0.8',

'Connection': 'close',

'Cookie': 'UM_distinctid=15b89d53a930-02ab95f11ccae2-51462d15-1aeaa0-15b89d53a9489b; CNZZDATA1258679142=1544557204-1492664886-%7C1493280769; remember_user_token=W1szNDgxMjU3XSwiJDJhJDEwJDlSS3VLcFFWMlZzNFJuOFFNS1JQR3UiLCIxNDk0MjEzNDQ3LjYwODEwNzgiXQ%3D%3D--9241542a4e44d55acaf8736a1d57dd0e96ad4e7a; _ga=GA1.2.2016948485.1492666105; _gid=GA1.2.824702661.1494486429; _gat=1; Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1494213432,1494213612,1494321303,1494387194; Hm_lpvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1494486429; _session_id=czl6dzVOeXdYaEplRVdndGxWWHQzdVBGTll6TVg5ZXFDTTI5cmN2RUsvS2Y2d3l6YlkrazZkZWdVcmZDSjFuM2tpMHpFVHRTcnRUVnAyeXhRSnU5UEdhaGMrNGgyMTRkeEJYOE9ydmZ4N1prN1NyekFibkQ5K0VrT3paUWE1bnlOdzJrRHRrM0Z2N3d3d3hCcFRhTWdWU0lLVGpWWjNRdjArZkx1V2J0bGJHRjZ1RVBvV25TYnBQZmhiYzNzOXE3VWNBc25YSS93WUdsTEJFSHVIck4wbVI5aWJrUXFaMkJYdW41WktJUDl6OVNqZ2k0NWpGL2dhSWx0S2FpNzhHcFZvNGdQY012QlducWgxNVhoUEN0dUpCeUI4bEd3OXhiMEE2WEplRmtaYlR6VTdlZXFsaFFZMU56M2xXcWwwbmlZeWhVb0dXKzhxdEtJaFZKaUxoZVpUZEZPSnBGWmF3anFJaFZpTU9Icm4wcllqUFhWSzFpYWF4bTZmSEZ1QXdwRWs3SHNEYmNZelA4VG5zK0wvR0MwZDdodlhZakZ6OWRVbUFmaE5JMTIwOD0tLXVyVEVSeVdOLy9Cak9nVG0zV0hueVE9PQ%3D%3D--ea401e8c501e7b749d593e1627dbaa88ab4befc2',

'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36',

'Host':'www.jianshu.com',

"X-Requested-With": 'XMLHttpRequest'

}

def get_total_page(self):

#获取专题总页数 包含3个字典的列表 [{"hot": xx}, {"recommend":xx}, {"city": xx}]

total_page_list = []

for order in self.topic_category:

order = order.decode('utf-8')

total_page = 100

dict = {}

for page in range(1, total_page):

url = self.base_url % (page, order)

html = requests.get(url, headers=self.headers).content

selector = etree.HTML(html)

#print html

try:

elements = selector.xpath('//*[@id="list-container"]/div[1]/div/h4/a/text()')[0]

if elements is not Exception:

continue

except Exception :

dict['total_page'] = page - 1

dict['category'] = order

break

total_page_list.append(dict)

return total_page_list

def get_topic_info(self):

#获取专题信息

topic_info_list = []

total_page_list = self.get_total_page()

base_url = self.base_url

for dict in total_page_list:

category = dict['category']

total_page = int(dict['total_page'])

for page in range(1, total_page + 1):

url = base_url % (page, category)

html = requests.get(url, headers=self.headers,cookies=self.cookies).content

selector = etree.HTML(html)

topic_href = selector.xpath('//*[@id="list-container"]')[0]

for href in topic_href:

dict = {}

topic_name = href.xpath('./div/h4/a/text()')[0]

topic_url = "www.jianshu.com" + href.xpath('./div/h4/a/@href')[0]

topic_img_url = href.xpath('./div/a/img/@src')[0]

img_num = topic_img_url.split("/")[5]

dict['topic_name'] = topic_name

dict['topic_url'] = topic_url

#

dict['img_num'] = img_num

topic_info_list.append(dict)

return topic_info_list

def get_topic_admin_info(self):

#获取管理员信息

topic_admin_info_list = []

topic_info_list = self.get_topic_info()

for d in topic_info_list:

img_num = str(d['img_num'])

base_url = "http://www.jianshu.com/collections/%s/editors_and_subscribers" % img_num

base_url_response = requests.get(base_url, headers=self.headers, cookies=self.cookies)

json_data_base = json.loads(base_url_response.text.decode('utf-8'))

editors_total_pages = json_data_base['editors_total_pages']

for page in range(1, int(editors_total_pages) + 1):

if page == 1:

editors = json_data_base['editors']

for editor in editors:

dict = {}

dict['nickname'] = editor['nickname']

dict['slug'] = editor['slug']

topic_admin_info_list.append(dict)

else:

try:

url = "http://www.jianshu.com/collections/{}/editors?page={}".format(img_num, page)

response = requests.get(url,headers=self.headers,cookies=self.cookies)

json_data = json.loads(response.text.decode('utf-8'))

editors = json_data['editors']

for editor in editors:

dict = {}

dict['nickname'] = editor['nickname']

dict['slug'] = editor['slug']

topic_admin_info_list.append(dict)

except Exception:

pass

return topic_admin_info_list

def get_followers_following_list(self):

# 获取管理员粉丝列表

followers_list = []

topic_admin_list = self.get_topic_admin_info()

followers_base_url = "http://www.jianshu.com/users/%s/followers"

for dict in topic_admin_list:

url = followers_base_url % dict['slug']

headers = self.headers

headers['Referer'] = url

headers['DNT'] = '1'

response = requests.get(url, headers=headers, cookies=self.cookies).content

total_followers = re.findall(r'

(\d+)

', response)[1]

total_page = int(total_followers) / 9 + 1

for page in range(1, int(total_page) + 1):

followers_url = url + "?page="+ str(page)

html = requests.get(followers_url, headers=headers, cookies=self.cookies).content

list = re.findall(r'class="name" href="(.+?)">.*', html)

followers_list.extend(list)

return followers_list

def get_recommand_editor(self):

# 获取推荐作者列表

recommand_editors_list = []

for page in range(1, 10):

url = "http://www.jianshu.com/recommendations/users?page={}".format(str(page))

headers = self.headers

headers["Referer"] = "http://www.jianshu.com/recommendations/users?utm_source=desktop&utm_medium=index-users"

headers["Accept"] = "text/html, */*; q=0.01"

headers["Connection"] = "keep-alive"

html = requests.get(url, headers=headers, cookies=self.cookies).content

lists = re.findall(r'class="_blank" herf="/users/(.+?)">.*', html)

recommand_editors_list.extend(lists)

return recommand_editors_list

def get_recommand_editor_followers(self):

#获取推荐作者的粉丝

get_recommand_editor_followers_list = []

recommand_editor_list = self.get_recommand_editor()

for editor in recommand_editor_list:

url = "http://www.jianshu.com/u/" + str(editor)

followers_base_url = "http://www.jianshu.com/users/{}/followers".format(str(editor))

headers = self.headers

headers['Referer'] = url

headers['DNT'] = '1'

response = requests.get(url, headers=headers, cookies=self.cookies).content

total_followers = re.findall(r'

(\d+)

', response)[1]

total_page = int(total_followers) / 9 + 1

for page in range(1, int(total_page) + 1):

followers_url = followers_base_url + "?page=" + str(page)

html = requests.get(followers_url, headers=headers, cookies=self.cookies).content

list = re.findall(r'class="name" href="(.+?)">.*', html)

get_recommand_editor_followers_list.extend(list)

return get_recommand_editor_followers_list

def is_redirect(self, url):

#判断是否是重定向302

state_code = requests.get(url, allow_redirects = False).status_code

if state_code == "200":

return True

else:

return False

def start_requests(self):

followers_lists = self.get_followers_following_list()

editor_followers_list = self.get_recommand_editor_followers()

# 管理员粉丝列表 + 推荐作者粉丝列表

tota_list = followers_lists + editor_followers_list

tota_list = list(set(tota_list))

base_url = "http://www.jianshu.com/%s"

for d in tota_list:

if d is None:

continue

else:

url = base_url % d

timeline_url = "http://www.jianshu.com/users/%s/timeline" % d.split("/")[2]

headers = self.headers

headers['Referer'] = url

headers['Upgrade-Insecure-Requests'] = 1

headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'

yield Request(timeline_url, headers=headers, callback=self.parse)

def parse(self, response):

data = response.body

item = {}

topic_admin_name = re.findall(r'class="name" href=".*">(.+?)', data)[0]

item['topic_admin_name'] = str(topic_admin_name)

one = re.findall(r'

(\d+)

', data)

item['topic_admin_gz'] = str(one[0])

item['topic_admin_fans'] = str(one[1])

item['topic_admin_essay_num'] =str(one[2])

item['topic_admin_word_num'] = str(one[3])

item['topic_admin_like'] = str(one[4])

yield item

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
【为什么学爬虫?】        1、爬虫入手容易,但是深入较难,如何写出高效率的爬虫,如何写出灵活性高可扩展的爬虫都是一项技术活。另外在爬虫过程中,经常容易遇到被反爬虫,比如字体反、IP识别、验证码等,如何层层攻克难点拿到想要的数据,这门课程,你都能学到!        2、如果是作为一个其他行业的开发者,比如app开发,web开发,学习爬虫能让你加强对技术的认知,能够开发出更加安全的软件和网站 【课程设计】 一个完整的爬虫程序,无论大小,总体来说可以分成三个步骤,分别是:网络请求:模拟浏览器的行为从网上抓数据。数据解析:将请求下来的数据进行过滤,提我们想要的数据。数据存储:将提到的数据存储到硬盘或者内存中。比如用mysql数据库或者redis等。那么本课程也是按照这几个步骤循序渐进的进行讲解,带领学生完整的掌握每个步骤的技术。另外,因为爬虫的多样性,在的过程中可能会发生被反、效率低下等。因此我们又增加了两个章节用来提高爬虫程序的灵活性,分别是:爬虫进阶:包括IP代理,多线程爬虫,图形验证码识别、JS加密解密、动态网页爬虫、字体反识别等。Scrapy和分布式爬虫:Scrapy框架、Scrapy-redis组件、分布式爬虫等。通过爬虫进阶的知识点我们能应付大量的反网站,而Scrapy框架作为一个专业的爬虫框架,使用他可以快速提高我们编写爬虫程序的效率和速度。另外如果一台机器不能满足你的需求,我们可以用分布式爬虫让多台机器帮助你快速数据。 从基础爬虫到商业化应用爬虫,本套课程满足您的所有需求!【课程服务】 专属付费社群+定期答疑

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值