一般某些网站,有手机版、网页版两个版本,而爬取两个版本的数据的难度是不同的,根据我的经验而言,爬取手机版更方便。因此在爬取数据之前,要想好爬取哪个网站。由于网站结构在不断变化,因此代码不是一劳永逸的!
该文章主要介绍了爬取某讯新闻手机版下,各个板块的一定数量的新闻数据。
主要思路是使用selenium自动化刷新网页,刷新时判断新闻数量是否达到要求,达到则停止,然后分析页面中的html,获取所有新闻的链接,然后根据新闻的链接访问新闻网址,对该网址进一步分析获取时间、标题、来源、内容这四部分信息。
我的参考代码如下,如需使用,需要修改一些地方,例如浏览器驱动的路径(未安装需安装)。示例代码只爬取了sports板块的新闻,如需添加,请修改target_list列表。
# created time:2023-03-10
import re
from bs4 import BeautifulSoup
import xlsxwriter
import pandas as pd
import requests
import selenium.common.exceptions
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from lxml import etree
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import time
import json
from selenium.webdriver.common.by import By
# 腾讯新闻网页版selenium爬虫框架
class qqnews_spider():
qqnews_url = "https://xw.qq.com/"
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36 Edg/96.0.1054.53'}
def __init__(self, url, executable_path, path, n):
self.n = n
self.url = url # 主页的url
self.executable_path = executable_path # 浏览器模拟exe的地址
self.text = None
self.path = path
self.df = []
def main(self):
driver = webdriver.Edge(executable_path=self.executable_path)
driver.get(self.qqnews_url)
time.sleep(5)
# self.add_cookie(driver) # 增加cookie
driver.get(self.url)
print("正在访问目标页面...")
time.sleep(4)
self.roll_to_n(driver)
time.sleep(5)
self.html_paser() # 内容在self.df中
df = pd.DataFrame(self.df, columns=["时间", "标题", "来源", "链接", "正文内容"])
df.drop_duplicates(inplace=True)
df.to_excel("{}.xlsx".format(self.path), index=None, engine="xlsxwriter")
# text = driver.page_source
# with open("data.html",mode="w+",encoding="utf-8") as f:
# f.write(text)
# self.text = text
def add_cookie(self, driver):
# cookies.json文件需要事先更新
with open("cookies.json", "r+", encoding="utf-8") as f:
listCookies = json.loads(f.read())
# 往browser里添加cookies
for cookie in listCookies:
cookie_dict = {
'domain': '.qq.com',
'name': cookie.get('name'),
'value': cookie.get('value'),
"expires": '',
'path': '/',
'httpOnly': False,
'HostOnly': False,
'Secure': False
}
driver.add_cookie(cookie_dict)
def roll_to_n(self, driver): # 直到出现前n条新闻
result = []
while (len(result) < self.n):
# driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
print(len(result))
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") # 滑到底
time.sleep(0.2)
html = etree.HTML(driver.page_source)
result = list(set(html.xpath('//div[@dt-eid="em_item_article"]//a//p/text()'))) # 获取新闻块并按照文章标题去重
time.sleep(1)
text = driver.page_source
self.text = text
def html_paser(self): # 根据self.text,获取新闻标题、来源、时间、链接
ori = self.text
ori = ori.replace("<br>", "\n")
ori = ori.replace("%2F", "/")
ori = ori.replace("\\n", "")
ori = ori.replace("\u200b", " ") # 更换空格
html = etree.HTML(ori)
card_list = html.xpath('//div[@dt-eid="em_item_article"]')
print("开始处理html")
i = 1
for card in card_list:
try:
s = etree.tostring(card, encoding='utf-8').decode('utf-8')
card = etree.HTML(s)
link = card.xpath('//a/@href')[0]
# 请求网页内容
# 发送HTTP请求,获取网页内容
response = requests.get(link, headers=self.header)
text = response.text
text = re.sub(".+window.initData = ", "", text)
text = re.sub(";</script><script>.+", "", text)
dic = json.loads(text)
content = dic["content"]
time_ = content["time"]
title = content["title"]
source = content["source"]
content = content.copy()["content"]
text = content["text"]
text = re.sub(r"</P><P>", "\n", text)
text = re.sub(r'<.*?>', '', text)
self.df.append([time_, title, source, link, text])
print(i)
i += 1
except:
pass
pass
if __name__ == "__main__":
target_list = ["sports"]
for target in target_list:
url = "https://xw.qq.com/m/{}".format(target)
executable_path = r'D:\xzh\Documents\msedgedriver.exe'
spider = qqnews_spider(url, executable_path, target, 400)
spider.main()