爬取🐧新闻首页信息
1.了解ajax异步加载
2.使用chrome的开发者工具,监控网络请求,分析网络结构和信息流
3.使用selenium完成爬虫:
用selenium爬取https://news.qq.com/ 的首页新闻
import time
from selenium import webdriver
import pandas as pd
from bs4 import BeautifulSoup
driver = webdriver.Chrome(executable_path='E:\Anaconda3\Scripts\chromedriver.exe')
driver.get("https://news.qq.com")
#使用ajax加载
for i in range(1,100):
time.sleep(2)
driver.execute_script("window.scrollTo(window.scrollX, %d);"%(i*200))
html=driver.page_source
#解析html
bsObj=BeautifulSoup(html,"lxml")
jxtits=bsObj.find_all("div",{"class":"jx-tit"})[0].find_next_sibling().find_all("li")
res = pd.DataFrame()
print("index",",","title",",","url")
csvRow_index = []
csvRow_title = []
csvRow_url = []
for i,jxtit in enumerate(jxtits):
try:
text=jxtit.find_all("img")[0]["alt"]
csvRow_title.append(text)
except:
text=jxtit.find_all("div",{"class":"lazyload-placeholder"})[0].text
csvRow_title.append(text)
try:
url=jxtit.find_all("a")[0]["href"]
csvRow_url.append(url)
except:
print(jxtit)
csvRow_url.append(url)
csvRow_index.append(i)
print(i+1,",",text,",",url)
csv_file = pd.DataFrame()
csv_file['index'] = csvRow_index
csv_file['title'] = csvRow_title
csv_file['url'] = csvRow_url
csv_file.to_csv('penguin_news.csv',index=None)
导出为csv文件,包含新闻标题和url地址: