import time
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By
import json
def get_driver():
chrome_options = webdriver.EdgeOptions()
chrome_options.add_experimental_option("excludeSwitches", ['enable-automation'])
chrome_options.add_argument("--disable-blink-features=AutomationControlled")
service = Service(executable_path="./msedgedriver.exe")
inner_driver = webdriver.Edge(service=service, options=chrome_options)
return inner_driver
driver = get_driver()
def get_url():
# 构造页面的链接
page_urls = [f"http://www.onezh.com/zhanhui/{i + 1}_0_0_11_20170101/20171231/" for i in range(1)]
res = []
# 收集页面中每个项目的链接
for page_url in page_urls:
driver.get(page_url)
a_tags = driver.find_elements(by=By.XPATH, value="//div[@class='jxs_list']/div[@class='row']/a[1]")
for a in a_tags:
res.append(a.get_attribute("href"))
return res
def parse_right_detail(tuan_dcon, content):
time = tuan_dcon.find_element(by=By.XPATH, value=".//dl[1]")
time_key_value = time.text.split(":")
content['展会时间'] = time_key_value[1].replace("纠错", "")
place = tuan_dcon.find_element(by=By.XPATH, value=".//dl[2]")
place_key_value = place.text.split(":")
content['展会地点'] = place_key_value[1].replace("乘车路线", "").replace("全景地图", "")
institution_str = tuan_dcon.find_element(by=By.XPATH, value=".//dl[3]//dd").text
institutions = institution_str.split("\n")
institution_dict = dict()
# 收集主办单位和承办单位等
for institution in institutions:
institution_key_value = institution.split(":")
institution_dict[institution_key_value[0]] = institution_key_value[1]
content['组织机构'] = institution_dict
def parse_left_detail(left_ul, content):
industry = left_ul.find_element(by=By.XPATH, value=".//li[1]").text.replace("所属行业:", "")
city = left_ul.find_element(by=By.XPATH, value=".//li[2]").text.replace("展会城市:", "")
content['所属行业'] = industry
content['展会城市'] = city
def get_content(url):
"""
收集 url 页面中的内容
:param url:
:return: 返回一个字典
"""
content = dict()
driver.get(url)
time.sleep(1)
# 收集名称
title = driver.find_element(by=By.ID, value="tuan-title").text
content['名称'] = title
# 解析右边的内容
tuan_dcon = driver.find_element(by=By.XPATH,value="//div[@class='tuan-dcon']")
parse_right_detail(tuan_dcon, content)
# 解析左边的内容
left_ul = driver.find_element(by=By.XPATH, value="//div[@class='tuan-dside']//ul")
parse_left_detail(left_ul, content)
return content
if __name__ == '__main__':
urls = get_url()
res = []
# 遍历每一个项目
for url in urls:
try:
# 将结果追加到 res 中
res.append(get_content(url))
except Exception as e:
print(url, e)
time.sleep(2)
# 将收集到的数据保存为 json
with open("./data.json", "w",encoding="utf-8") as f:
json.dump(res, f, indent=2, ensure_ascii=False) # 把数据类型转换成字符串并存储在文件中
driver.close()
Python爬虫数据JSON格式
最新推荐文章于 2024-03-02 03:34:00 发布