#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Required
- requests (必须)
- pillow (可选)
Info
- author : "xchaoinfo"
- email : "xchaoinfo@qq.com"
- date : "2016.2.4"
Update
- name : "wangmengcn"
- email : "eclipse_sv@163.com"
- date : "2016.4.21"
'''
import requests
try:
import cookielib
except:
import http.cookiejar as cookielib
import re
import time
import os.path
import json
from bs4 import BeautifulSoup
try:
from PIL import Image
except:
pass
# 构造 Request headers
agent = 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36'
headers = {
"Host": "www.zhihu.com",
"Referer": "https://www.zhihu.com/",
'User-Agent': agent
}
# 使用登录cookie信息
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='cookies')
try:
print('加载文件中的cookies')
session.cookies.load(ignore_discard=True)
except:
print("Cookie 未能加载")
def get_xsrf():
'''_xsrf 是一个动态变化的参数'''
index_url = 'https://www.zhihu.com'
# 获取登录时需要用到的_xsrf
index_page = session.get(index_url, headers=headers)
html = index_page.text
pattern = r'name="_xsrf" value="(.*?)"'
# 这里的_xsrf 返回的是一个list
_xsrf = re.findall(pattern, html)
return _xsrf[0]
# 获取验证码
def get_captcha():
t = str(int(time.time() * 1000))
captcha_url = 'https://www.zhihu.com/captcha.gif?r=' + t + "&type=login"
r = session.get(captcha_url, headers=headers)
with open('captcha.jpg', 'wb') as f:
f.write(r.content)
f.close()
# 用pillow 的 Image 显示验证码
# 如果没有安装 pillow 到源代码所在的目录去找到验证码然后手动输入
try:
im = Image.open('captcha.jpg')
im.show()
im.close()
except:
print(u'请到 %s 目录找到captcha.jpg 手动输入' % os.path.abspath('captcha.jpg'))
captcha = input("please input the captcha\n>")
return captcha
def isLogin():
# 通过查看用户个人信息来判断是否已经登录
url = "https://www.zhihu.com/settings/profile"
login_code = session.get(url, headers=headers, allow_redirects=False).status_code
if login_code == 200:
return True
else:
return False
#获取用户的信息
def info():
url = "https://www.zhihu.com/settings/profile"
html = session.get(url, headers=headers, allow_redirects=False).text
pattern = r'class="name">(.*?)</span>'
# 这里的_xsrf 返回的是一个list
name = re.findall(pattern, html)
print('用户名:', name[0])
pattern=r'<div id="js-url-preview" class="url-preview">zhihu.com/people/<span class="token">(.*?)</span></div>'
infoUrl = re.findall(pattern, html)
print(infoUrl[0])
#得到个人信息的页面的URL
infoUrl="https://www.zhihu.com/people/"+infoUrl[0]
html2=session.get(infoUrl,headers=headers).text
pattern2=r'src="(.*?)" srcset'
figUrl=re.findall(pattern2,html2)[0]
print('头像网址',figUrl)
filename = 'pics/' + name[0] + '.jpg'
f = open(filename, 'wb')
f.write(requests.get(figUrl).content)
f.close()
#保存用户tag的关注者的头像 url为用户的个人信息页面
def get_info(tag,url):
infoUrl=url
html2=session.get(infoUrl,headers=headers).text
pattern2=r'src="(.*?)" srcset'
figUrl=re.findall(pattern2,html2)[0] #获得的头像连接
n=str(url)[29:]
filename = tag+'/' + n+ '.jpg'
f = open(filename, 'wb')
f.write(requests.get(figUrl).content)
f.close()
#获取某一名用户的所有关注者的头像
def get_followers(tag="dong-wu-61-97"): #其中tag 是用户的唯一标识
if not os.path.exists(tag):
os.mkdir(tag)
url='https://www.zhihu.com/people/'+tag+'/followers'
html=session.get(url,headers=headers).text
soup = BeautifulSoup(html ,'html.parser')
co=soup.find_all('a',class_="zg-link author-link")
num=1
while co:
for i in co:
patterni=r'href="(.*?)"'
ci=re.findall(patterni,i.decode())
#print (ci[0]) #关注者的个人信息页面
get_info(tag,ci[0]) #保存头像
num+=1
time.sleep(1)#需要休息一下,否则会被知乎禁止
url = 'https://www.zhihu.com/people/' + tag + '/followers'+'?page='+str(num)
print (url)
html = session.get(url, headers=headers).text
soup = BeautifulSoup(html, 'html.parser')
co = soup.find_all('a', class_="zg-link author-link")
#保存知乎的用户头像
def get_avatar():
userId='dong-wu-61-97'
url = 'https://www.zhihu.com/people/' + userId
response = session.get(url, headers=headers)
response = response.content
soup = BeautifulSoup(response, 'lxml')
name = soup.find_all('span', {'class': 'name'})[1].string
print (name)
temp = soup.find('img', {'alt': name})
#avatar_url = temp['src'][0:-6] + temp['src'][-4:]
filename = 'pics/' + userId + '.jpg'
f = open(filename, 'wb')
avatar_url='https://pic4.zhimg.com/839d556b20f227b93f841de54717904b_l.jpg'
f.write(requests.get(avatar_url).content)
f.close()
#获取一个问题下面的所有的关注者的信息
def get_voters(ans_id):
Zhihu='http://www.zhihu.com'
#其中ans_id 是这个问题的id
filename=str(ans_id)+'.txt'
f=open(filename,'w')
url='http://www.zhihu.com'+'/answer/'+str(ans_id)+'/voters_profile'
source=session.get(url,headers=headers)
#print(source)
content=source.content
print(content) #json语句
data=json.loads(content.decode()) # 包含总赞数、一组点赞者的信息、指向下一组点赞者的资源等的数据
#打印总赞数目
txt1='总赞数'
print(txt1)
total = data['paging']['total'] # 总赞数
print(total)
# 通过分析,每一组资源包含10个点赞者的信息(当然,最后一组可能少于10个),所以需要循环遍历
next_url=url
num=0
while next_url !='http://www.zhihu.com':
try:
nextsource = session.get(next_url, headers=headers)
except:
time.sleep(2)
nextsource = session.get(next_url, headers=headers)
# 解析出点赞者的信息
nextcontent = nextsource.content.decode()
nextdata = json.loads(nextcontent)
for each in nextdata['payload']:
num+=1
print (num)
try:
soup = BeautifulSoup(each, 'lxml')
tag = soup.a
title = tag['title'] # 点赞者的用户名
href = 'http://www.zhihu.com' + str(tag['href']) # 点赞者的地址
# 获取点赞者的数据
list = soup.find_all('li')
votes = list[0].string # 点赞者获取的赞同
tks = list[1].string # 点赞者获取的感谢
ques = list[2].string # 点赞者提出的问题数量
ans = list[3].string # 点赞者回答的问题数量
# 打印点赞者信息
string = title + ' ' + href + ' ' + votes + tks + ques + ans
f.write(string.decode() + '\n')
print (string)
except:
txt3 = '有点赞者的信息缺失'
f.write(txt3 + '\n')
print(txt3)
continue
# 解析出指向下一组点赞者的资源
next_url = Zhihu + nextdata['paging']['next']
f.close()
def login(secret, account):
_xsrf = get_xsrf()
headers["X-Xsrftoken"] = _xsrf
headers["X-Requested-With"] = "XMLHttpRequest"
# 通过输入的用户名判断是否是手机号
if re.match(r"^1\d{10}$", account):
print("手机号登录 \n")
post_url = 'https://www.zhihu.com/login/phone_num'
postdata = {
'_xsrf': _xsrf,
'password': secret,
'phone_num': account
}
else:
if "@" in account:
print("邮箱登录 \n")
else:
print("你的账号输入有问题,请重新登录")
return 0
post_url = 'https://www.zhihu.com/login/email'
postdata = {
'_xsrf': _xsrf,
'password': secret,
'email': account
}
# 不需要验证码直接登录成功
login_page = session.post(post_url, data=postdata, headers=headers)
login_code = login_page.json()
if login_code['r'] == 1:
# 不输入验证码登录失败
# 使用需要输入验证码的方式登录
postdata["captcha"] = get_captcha()
login_page = session.post(post_url, data=postdata, headers=headers)
login_code = login_page.json()
print(login_code['msg'])
# 保存 cookies 到文件,
# 下次可以使用 cookie 直接登录,不需要输入账号和密码
session.cookies.save()
try:
input = input
except:
pass
if __name__ == '__main__':
if isLogin():
print('您已经登录')
#info()
get_followers()
else:
account = input('请输入你的用户名\n> ')
secret = input("请输入你的密码\n> ")
login(secret, account)
爬取知乎用户信息、头像、问题关注者、用户的所有关注者的头像
最新推荐文章于 2023-08-14 17:26:41 发布