from selenium import webdriver#导入库
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import csv,time
import os,re
import requests
import selenium.webdriver.support.ui as ui
import urllib
chromeOptions = webdriver.ChromeOptions()
prefs = {"download.default_directory":"D:\\pufa"}
chromeOptions.add_experimental_option("prefs", prefs)
browser = webdriver.Chrome(chrome_options=chromeOptions)#声明浏览器
positon = {}
def enterinfo():
url = 'http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/search'
browser.get(url)#打开浏览器预设网址
input = browser.find_element_by_css_selector('input[placeholder $= "标题关键字"]')
input.send_keys('浦发银行')
browser.find_element_by_class_name("el-range__close-icon").click()#删除原来日期信息
input = browser.find_element_by_css_selector('input[placeholder $= "开始日期"]')
input.send_keys('2020-01-01')
input = browser.find_element_by_css_selector('input[placeholder $= "结束日期"]')
xianzai = time.strftime("%Y-%m-%d", time.localtime())
input.send_keys(xianzai)
time.sleep(2)
browser.find_elements_by_xpath('//*[@id="main"]/div[2]/div[1]/div[2]/div[1]/div[2]/div[1]/button/span')[0].click()
#填写文本
time.sleep(2)#睡眠5
def GainPage():
source = browser.page_source # 打印网页源代码
soup = BeautifulSoup(source, 'lxml')
ul_list = soup.select('div.el-table__body-wrapper')[0]
for ul in ul_list.select('tr.el-table__row'):
web = ul.select('td.el-table_1_column_3')[0].select('span.ahover')[0].select('a')[0]
webs = web.get('href')
url = 'http://www.cninfo.com.cn'
url = url + webs
biaoti = web.text
positon[biaoti] = url
time.sleep(2)
enterinfo()
i = 1
while(i):
GainPage()
browser.find_elements_by_xpath('//*[@id="main"]/div[2]/div[1]/div[1]/div[3]/div/button[2]/i')[0].click()
i = i+1
if i==9:
break
print(len(positon))
for it in positon.items():
print(it)
for val in positon.values():
url = val
browser.get(url) # 打开浏览器预设网址
browser.find_elements_by_xpath('//*[@id="noticeDetail"]/div/div[1]/div[3]/div[1]/button/span')[0].click()
【Python爬虫】使用Selenium爬取指定上市公司(如浦发银行)的今年公告信息
最新推荐文章于 2024-05-01 01:51:49 发布