Python爬虫系列(四):爬取腾讯新闻&知乎
一、爬取腾讯新闻
- 了解ajax加载
- 通过chrome的开发者工具,监控网络请求,并分析
- 用selenium完成爬虫
- 具体流程如下:
用selenium爬取https://news.qq.com/ 的热点精选 热点精选至少爬50个出来,存储成csv 每一行如下 标号(从1开始),标题,链接,…
1)爬取给的腾讯新闻网页
#主要是从给定腾讯新闻网页中爬取新闻的题目,时间,正文,作者
import requests
from bs4 import BeautifulSoup
def getHTMLText(url):
try:
r = requests.get(url, timeout = 30)
r.raise_for_status()
#r.encoding = 'utf-8'
return r.text
except:
return ""
def getContent(url):
html = getHTMLText(url)
# print(html)
soup = BeautifulSoup(html, "html.parser")
title = soup.select("div.hd > h1")
print(title[0].get_text())
time = soup.select("div.a_Info > span.a_time")
print(time[0].string)
author = soup.select("div.qq_articleFt > div.qq_toolWrap > div.qq_editor")
print(author[0].get_text())
paras = soup.select("div.Cnt-Main-Article-QQ > p.text")
for para in paras:
if len(para) > 0:
print(para.get_text())
print()
#写入文件
fo = open("text.txt", "w+")
fo.writelines(title[0].get_text() + "\n")
fo.writelines(time[0].get_text() + "\n")
for para in paras:
if len(para) > 0:
fo.writelines(para.get_text() + "\n\n")
fo.writelines(author[0].get_text() + '\n')
fo.close()
#将爬取到的文章用字典格式来存
article = {
'Title' : title[0].get_text(),
'Time' : time[0].get_text(),
'Paragraph' : paras,
'Author' : author[0].get_text()
}
print(article)
def main():
url = "http://news.qq.com/a/20170504/012032.htm"
getContent(url);
main()
2) 爬取腾讯视频评论
# 爬取腾讯视频评论
import re
import random
import urllib.request
# 构建用户代理
uapools = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0",
]
# 从用户代理池随机选取一个用户代理
def ua(uapools):
thisua = random.choice(uapools)
# print(thisua)
headers = ("User-Agent", thisua)
opener = urllib.request.build_opener()
opener.addheaders = [headers]
# 设置为全局变量
urllib.request.install_opener(opener)
# 获取源码
def get_content(page, lastId):
url = "https://video.coral.qq.com/varticle/3242201702/comment/v2?callback=_varticle3242201702commentv2&orinum=10&oriorder=o&pageflag=1&cursor=" + lastId + "&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=132&_=" + str(
page)
html = urllib.