问题:采集百度百家号 文章、动态、视频 的标题 ,发布时间 ,播放量 ,视频文件
阅读数等
代码:
# coding = utf-8
import os
import re
import time
import json
from datetime import datetime, timedelta
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
from PIL import Image
from PIL import ImageChops
from pathlib import Path
from fake_useragent import UserAgent
# from config import no_access_screenshot_time
from multiprocessing.dummy import Pool # 线程池
from lxml import etree
# ua = UserAgent()
headers = {
# "Accept": '*/*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,ar;q=0.7',
# 'Connection': 'keep-alive',
# 'Content-Length': '62',
# 'Content-Type': 'application/json;charset=UTF-8',
# 'Host': 'cq.gov.cn',
# 'Origin': 'http://www.cq.gov.cn',
# 'Referer': 'http://www.cq.gov.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
}
ctime = ''
import importlib,sys
importlib.reload(sys)
api_url = 'https://mbd.baidu.com/webpage?'
f = open('dynamic.txt', 'w', encoding='utf-8')
f.close()
f = open('article.txt', 'w', encoding='utf-8')
f.close()
f = open('video.txt', 'w', encoding='utf-8')
f.close()
def write_txt_d(content):
with open('dynamic.txt', 'a+', encoding='utf-8') as f1:
f1.write(content)
f1.close()
def write_txt_a(content):
with open('article.txt', 'a+', encoding='utf-8') as f1:
f1.write(content)
f1.close()
def write_txt_v(content):
with open('video.txt', 'a+', encoding='utf-8') as f1:
f1.write(content)
f1.close()
def dynamic():
"""
动态资源获取
:return:
"""
ctime = ''
while True:
params = {
'tab': 'dynamic',
'num': '10',
'uk': 'FqzL6WxL7taxiI2tDzr_9A',
'source': 'pc',
'ctime': ctime,
# 'ctime': '16215848239999',
'type': 'newhome',
'action': 'dynamic',
'format': 'jsonp',
'otherext': 'h5_20210521143926',
'Tenger-Mhor': '3133358778',
# 'callback': '__jsonp11622020821123',
}
# 自动获取cookie
session = requests.Session()
session.get(url=api_url)
response = session.get(url=api_url, params=params)
page = response.text
# print(page)
a = re.findall(r'"https:\\/\\/mbd.baidu.com\\/newspage\\/data\\/.*?"', page) # 获取动态全文页面的url
a = [i.replace('\\', '').replace('"', '') for i in a] # 清洗成可访问的动态全文页面的url
ctime = re.findall(r'"query":{"ctime":"(.*?)"', page) # 获取动态全文页面的url
if not ctime:
return
ctime = ctime[0]
# print(a,ctime)
for url_i in a:
page_z = session.get(url=url_i)
txt_context = page_z.text
try:
txt_context = re.findall(r'"date":.*?"follow', txt_context, re.S)[0] # 获取页面有用的数据
except:
continue
print('************************************************************************************')
# print(txt_context)
read_num = re.findall(r'readNum":{"count":(.*?)},', txt_context, re.S)[0] # 获取页面有用的数据
print("网页地址:", url_i)
print("阅读量:", read_num)
time_str = re.findall('"date":"(.*?)",', txt_context)[0] + " " + re.findall('"time":"(.*?)",', txt_context)[0]
print("动态发布时间:", time_str)
content_txt = re.findall('"title":"(.*?)"', txt_context)[0]
content_txt = re.sub(r'\[.*?]', '', content_txt) # 把表情图片剔除掉
# content_txt = content_txt.rpl
print("文章内容:", content_txt.encode('utf-8').decode('unicode-escape'))
image_url = re.findall('"image_url":"(.*?)"', txt_context)
image_url = [i.replace('\\', '').replace('"', '') for i in image_url] # 清洗成可访问的动态全文页面的url
print("文章图片地址:", image_url)
# img = 'https://pics0.baidu.com/feed/060828381f30e9241b7c2e5e2fbae00e1f95f7c4.jpeg?token=5ce97d75300ceb50f1856941a7c1c10d'
praise_num = re.findall(r'"praise_num":(\d+),', txt_context) # 点赞数
commentNum = re.findall(r'"commentNum":(\d+),', txt_context) # 评论数
print("点赞数:{},评论数:{}.".format(praise_num[0], commentNum[0]))
str_content = "网页地址:" + url_i + '\n' + "阅读量:" + read_num + '\n' + "动态发布时间:" + time_str + '\n' + \
"文章内容:" + content_txt.encode('utf-8').decode('unicode-escape') + "\n" + "文章图片地址:" + \
str(image_url) + '\n' + "点赞数:{},评论数:{}.".format(praise_num[0], commentNum[0]) + '\n' +\
"************************************************************************************" + "\n"
write_txt_d(str_content)
# time.sleep(1)
dynamic()
def article():
"""
文章资源获取
:return:
"""
ctime = ''
time_start = datetime.now()
while True:
params = {
'tab': 'article',
'num': '10',
'uk': 'FqzL6WxL7taxiI2tDzr_9A',
'source': 'pc',
'ctime': ctime,
'type': 'newhome',
'action': 'dynamic',
'format': 'jsonp',
'otherext': 'h5_20210521143926',
'Tenger-Mhor': '3133358778',
# callback: __jsonp41622099188559
}
session = requests.Session()
session.get(url=api_url)
response = session.get(url=api_url, params=params)
page = response.text
# print(page)
a = re.findall(r'"url":"(.*?)",', page) # 获取动态全文页面的url
a = [i.replace('\\', '').replace('"', '') for i in a] # 清洗成可访问的动态全文页面的url
# print(a)
ctime = re.findall(r'"ctime":(\d+)}', page) # 获取动态全文页面的url
# print(ctime)
if not ctime:
stop_time = datetime.now()
print("运行所用时间:{}".format(stop_time-time_start))
return
# 获取需要请求的参数
asyncParams = re.findall(r'"asyncParams":{(.*?)},', page)
# 定义阅读量的列表
read_num_list = []
# 获取阅读量
for i in asyncParams:
asyncParams_url = r'https://mbd.baidu.com/webpage?type=homepage&action=interact&format=jsonp&Tenger-Mhor=3133358778¶ms=[{%s}]&uk=FqzL6WxL7taxiI2tDzr_9A&' % i
qwq = session.get(url=asyncParams_url).text
try:
re_url = re.findall(r'"read_num":(.*?),', qwq)[0]
read_num_list.append(re_url)
except:
print('qwqw')
asyncParams_url = r'https://mbd.baidu.com/webpage?type=homepage&action=interact&format=jsonp&Tenger-Mhor=3133358778¶ms=[{%s}]&uk=FqzL6WxL7taxiI2tDzr_9A&' % i
qwq = session.get(url=asyncParams_url).text
re_url = re.findall(r'"read_num":(.*?),', qwq)[0]
read_num_list.append(re_url)
print("********未获取到重新获取一遍!*********")
print(asyncParams_url)
print(qwq)
print(re_url)
print(read_num_list)
print('qwqw')
# return
for url_i in a:
print("阅读量:", read_num_list[a.index(url_i)])
print("网页地址:", url_i)
page_z = session.get(url=url_i)
# page_z = session.get(url=url_i)
page_text = page_z.text
# 文章发布时
try:
time_str = re.findall('"date":"(.*?)",', page_text)[0] +" "+ re.findall('"time":"(.*?)",', page_text)[0]
except:
continue
print("文章发布时间:", time_str)
text_title = re.findall(r'leTitle_28fPT">(.*?)<', page_text)[0]
print("文章标题:", text_title)
txt_context = re.findall(r'诸暨市禾福健康管理有限公司官方帐号,优质健康领域创作者(.*?)相关文章', page_text)[0]
text = re.sub(r'<.*?>','',txt_context)
print("文章内容:", text)
# 文章图片获取:
image_url = re.findall('img src="(.*?)"', txt_context)
print("文章图片地址:", image_url, len(image_url))
str_content = "网页地址:" + url_i + '\n' + "阅读量:" + read_num_list[a.index(url_i)] + '\n' + "动态发布时间:" + time_str + '\n' + \
"文章标题:" + text_title + '\n' + "文章内容:" + text + "\n" + "文章图片地址:" + \
str(image_url) + '\n' + "************************************************************************************" + '\n'
print('************************************************************************************')
write_txt_a(str_content)
article()
video_api = 'https://mbd.baidu.com/webpage?'
def video():
ctime = ''
video_n = 0
ctime_list = []
while True:
params = {
'tab': 'video',
'num': '10',
'uk': 'FqzL6WxL7taxiI2tDzr_9A',
'source': 'pc',
'ctime': ctime,
'type': 'newhome',
'action': 'dynamic',
'format': 'jsonp',
'otherext': 'h5_20210521143926',
'Tenger-Mhor': '2959007734',
# callback: __jsonp41622099188559
}
session = requests.Session()
session.get(url=video_api)
response = session.get(url=api_url, params=params)
page = response.text
# print(page)
a = re.findall(r'"share_url":"(.*?)"', page) # 获取动态全文页面的url
a = [i.replace('\\', '').replace('"', '') for i in a] # 清洗成可访问的动态全文页面的url
ctime = re.findall(r'"query":{"ctime":(.*?)}', page) # 获取动态全文页面的url
ctime_list.append(ctime)
# print(a,ctime)
if not ctime:
return
if ctime_list.count(ctime) > 1:
return
for url_v in a:
page_z = session.get(url=url_v)
txt_context = page_z.text
# print(txt_context)
print('网页地址:'+url_v)
video_title = re.findall(r'title">(.*?)<', txt_context)
print('视频标题:'+video_title[0])
time_num = re.findall(r'laynums">(.*?)<', txt_context)
print('播放次数和时间:'+time_num[0])
video_download = re.findall(r'"url":"(.*?)"', txt_context)
video_download = [i.replace('\\', '').replace('"', '') for i in video_download]
print(video_download[-1])
video_n += 1
print(video_n)
bytes_v = session.get(url=video_download[-1]).content
with open('./video/{}.mp4'.format(video_n), 'wb') as fv:
fv.write(bytes_v)
str_content = "网页地址:" + url_v + '\n' + '播放次数和时间:'+time_num[0] + '\n' + \
"视频标题:" + video_title[0] + "\n" + "本地视频名称:" + '{}.mp4'.format(video_n) + "\n" + "视频下载地址:" + \
video_download[-1] + '\n' + "************************************************************************************" + '\n'
write_txt_v(str_content)
print('************************************************************************************')
video()
qtgw_url = 'https://author.baidu.com/home?from=bjh_article&app_id=1536910259501019'
采集后的txt文件: