excel中有200条企业名称,想要获得这200条企业的信息,通过python selenium 企业信息爬虫可以获得这些企业在天眼查网页上的信息
我不会爬虫,只会用selenium,只能通过selenium使网页跳转来获取想要的企业信息,
from selenium import webdriver
import pandas as pd
import numpy as np
from time import sleep
from lxml import etree
from openpyxl import workbook
from openpyxl import load_workbook
def input(name):
driver.maximize_window()
driver.find_element_by_xpath('//*[@id="page-container"]/div[1]/div/div[3]/div[2]/div[1]/div').send_keys(name)
driver.find_element_by_xpath('//*[@id="page-container"]/div[1]/div/div[3]/div[2]/div[1]/button').click()
driver.find_element_by_class_name('index_name__qEdWi').click()
driver.switch_to.window(driver.window_handles[-1])
source=driver.page_source
html=etree.HTML(source)
code = driver.find_element_by_class_name('index_copy-box__UK5hM')
print(code.text)
people = driver.find_element_by_class_name('index_link-click__9Yy_n')
coname = driver.find_element_by_class_name('index_company-name__MEKNQ')
tel = driver.find_element_by_class_name('index_detail-tel__cPPbr')
ws=excelwb['Sheet1']
i=s+2
ws.cell(i,2).value=code.text
ws.cell(i,3).value=people.text
ws.cell(i,4).value=coname.text
ws.cell(i,5).value=tel.text
excelwb.save('数据.xlsx')
if __name__ == "__main__":
df = pd.read_excel(r"C:\Users\hcw_h\Desktop\AA\数据.xlsx",usecols=[0],names=None)
df_lia = np.array(df.stack())
excelwb=load_workbook('数据.xlsx')
option = webdriver.ChromeOptions()
option.add_argument(r"user-data-dir=C:\Users\hcw_h\AppData\Local\Google\Chrome\User Data - 副本")
driver = webdriver.Chrome(options=option)
for s in range(198):
name=df_lia[s]
print(name)
driver.get("https://www.tianyancha.com/?jsid=SEM-BAIDU-PP-24H-000001&bd_vid=10463400191655164367&userid=31769301&query=%CC%EC%D1%DB%B2%E9&keywordid=275163844312&campaignid=154743647&groupid=5800886659")
try:
input(name)
except:
continue