import re
import requests
import json
import pandas
import os
import sys
from bs4 import BeautifulSoup
#获取请求
def getHTMLText(url,kv):
try:
r = requests.get(url, headers=kv)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except Exception as e:
print(e)
#解析出演员姓名与链接数据并存入文件
def parserData(text):
soup = BeautifulSoup(text,'lxml')
review_list = soup.find_all('li',{'class':'pages'})
soup1 = BeautifulSoup(str(review_list),'lxml')
all_dts = soup1.find_all('dt')
stars = []
i=0
for dt in all_dts:
star = {}
try:
print(dt.find('a').text)
star["name"] = dt.find('a').text
star["link"] = 'https://baike.baidu.com' + dt.find('a').get('href')
stars.append(star)
except Exception as e:
continue
i+=1
print(i)
json_data = json.loads(str(stars).replace("\'","\""))
with open('zhifou.json','w',encoding='UTF-8') as f:
json.dump(json_data,f,ensure_ascii=False)
if __name__ == '__main__':
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}
url = 'https://baike.baidu.com/item/%E7%9F%A5%E5%90%A6%E7%9F%A5%E5%90%A6%E5%BA%94%E6%98%AF%E7%BB%BF%E8%82%A5%E7%BA%A2%E7%98%A6/20485668?fr=aladdin'
text=getHTMLText(url, headers)
parserData(text)
print("所有信息爬取完成!")
09-20
2683
08-15
3623
05-29
1万+
08-06
6092