微博数据抓取下拉由json数据返回的动态数据
因为下拉的数据要不同的数据才能返回不同页码的数据,懒得分析参数了,
url 的参数是由首页数据的最后一个微博的参数决定的,没有整合到一起,这里只是做个示范,假定数据给定的情况,后续要加工成一体的。
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#获取 新浪微薄 下拉后得到的微博信息 返回json数据
from bs4 import BeautifulSoup
import Auth as head
import requests
import time
def getnextinfo():
t = time.time()
nowTime = lambda:int(round(t * 1000))
print (nowTime());
url='https://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain=100505&refer_flag=0000015010_&from=feed&loc=avatar&is_all=1&pagebar=0&pl_name=Pl_Official_MyProfileFeed__21&id=1005051892059383&script_uri=/yishuwuyu&feed_type=0&page=1&pre_page=1&domain_op=100505&__rnd=1564487703911'
header=head.getheader()
html_doc=requests.get(url,headers=header)
html=html_doc.json()
table=html['data']
#print html['data']
print '1111111111111111111111111111111111111111111111111111111'
soup = BeautifulSoup(table, 'html.parser', from_encoding='utf-8')
listcontent=soup.div.children
for z in listcontent:
s=BeautifulSoup(str(z), 'html.parser', from_encoding='utf-8')
t=s.find('div',class_='WB_text W_f14')
if t:
a=BeautifulSoup(str(t), 'html.parser', from_encoding='utf-8')
w= a.div
print w.text
print 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
head.getmianinfo()
getnextinfo()