用python爬虫爬取微博信息
话不多说,直接上代码!
import requests
from bs4 import BeautifulSoup
from urllib import parse
import time
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36 Edg/84.0.522.52"}
def get_html(url):
html = requests.get(url,headers=headers)
if html.status_code==200:
print("获取页面成功")
parse_html(html.text)
else:
print("ERROR",html.text)
return
def parse_html(content):
soup = BeautifulSoup(content,'lxml')
trs = soup.select('table tbody tr')
for tr in trs:
title = tr.select_one('td a').text
url = tr.select_one('td a')['href']
url = parse.urljoin('https://s.weibo.com',url)
message = title+url+'\n'
with open("C:/Users/86135/Desktop/微博信息.txt",'at',encoding='utf-8') as f:
f.write(message)
f.close()
if __name__ == '__main__':
start = time.time()
url = "https://s.weibo.com/top/summary?Refer=top_hot&topnav=1&wvr=6"
get_html(url)
url2 = "https://s.weibo.com/top/summary?cate=socialevent"
get_html(url2)
print(time.time()-start)
运行结果如下: