最近接了个图书馆的老师给我爬虫任务,花了八九个小时。
爬的是这样的一个界面,书籍名和其他信息,以及coverage下面的东西。一开始以为都要click出来,其实不用。可以直接找到的。
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
import xlwt
import re
#vertion 1.1
def spiderfin(p):
WebDriverWait(driver,40,0.5).until(EC.presence_of_element_located((By.XPATH, "/html/body/div[2]/div/div[1]/div/div/form/div[1]/p[2]/input[1]")))
page=driver.find_element_by_xpath('/html/body/div[2]/div/div[1]/div/div/form/div[1]/p[1]/strong').text
#行数
page=int(page)
time=int(page/10)
#翻页次数
number=0
def spiderone(number):
WebDriverWait(driver,40,0.5).until(EC.presence_of_element_located((By.XPATH, "/html/body/div[2]/div/div[1]/div/div/form/ul")))
books=driver.find_elements_by_class_name('hidden')
information=driver.find_element_by_xpath('/html/body/div[2]/div/div[1]/div/div/form/ul').text
information=re.sub('Coverage','',information)
ilist=re.split('\n',information)
while "" in ilist:
ilist.remove("")
lenline=len(books)
for i in range(1,lenline+1):
worksheet.write(number+i-1, 0, ilist[3*i-3])
worksheet.write(number+i-1, 1, ilist[3*i-2])
worksheet.write(number+i-1, 2, ilist[3*i-1])
name=books[i-1].get_attribute('textContent')
name= re.split("\n+",name)
while "" in name:
name.remove("")
worksheet.write(number+i-1, 3, ';'.join(name))
excelname='d:\\'+p+'.xls'
workbook.save(excelname)
#excel的名字
workbook = xlwt.Workbook(encoding = 'ascii')
worksheet = workbook.add_sheet('My Worksheet')
spiderone(number)
for i in range(time):
#循环time次,range和malab不同,此时时第i+2页
WebDriverWait(driver,40,0.5).until(EC.presence_of_element_located((By.XPATH, "/html/body/div[2]/div/div[1]/div/div/form/div[1]/p[2]/input[1]")))
driver.find_element_by_xpath("//input[@class='float_left'and @name='NEXT']").click()
number=number+10
spiderone(number)
driver = webdriver.Firefox()
link='http://mjl.clarivate.com/cgi-bin/jrnlst/jlsubcatg.cgi?PC=H'
driver.get(link)
WebDriverWait(driver,40,0.5).until(EC.presence_of_element_located((By.XPATH, "/html/body/div[2]/div/div[1]/div/form/p[3]/select")))
pages=driver.find_elements_by_tag_name('option')
pages.pop()
ps=[]
for page in pages:
ps.append(page.text)
driver.close()
for p in ps:
driver = webdriver.Firefox()
driver.get(link)
WebDriverWait(driver,40,0.5).until(EC.presence_of_element_located((By.XPATH, "/html/body/div[2]/div/div[1]/div/form/p[3]/select")))
selector = Select(driver.find_element_by_xpath("/html/body/div[2]/div/div[1]/div/form/p[3]/select"))
selector.select_by_visible_text(p)
driver.find_element_by_xpath("/html/body/div[2]/div/div[1]/div/form/p[3]/input[1]").click()
spiderfin(p)
driver.close()
很惭愧还是用了弱智selenium极慢速爬虫。不过反正没有速度要求,加上遇见js也只有这玩意好用,干脆偷懒了。
本次爬虫中学到了
- 要注意find-element和find_elements的区别
- 文件写入
- 页面的循环
- 下拉菜单框的处理