维普__获取页面简介

53 篇文章 1 订阅
18 篇文章 0 订阅
#!/usr/bin/python
#encoding=utf-8
__author__ = 'Administrator'
from  bs4 import  BeautifulSoup
import selenium
import sys
import urllib
import requests
import time
import re
import  csv
if __name__ == "__main__":
    import os
    from selenium import webdriver
    from selenium.webdriver.support.ui import WebDriverWait

    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}

    chromedriver = "/home/henson/Documents/pycharm/webdriver/chromedriver"
    os.environ["webdriver.chrome.driver"] = chromedriver
    driver = webdriver.Chrome(chromedriver)
    driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
    driver.get('http://qikan.cqvip.com/zk/search.aspx?key=M%3d%E5%A4%A7%E6%B0%94%E6%B1%A1%E6%9F%93%E9%98%B2%E6%B2%BB&page=5&ids=')
    #inputElement = driver.find_element_by_xpath("//*[@id='b_Text0']")#//*[@id="b_Text0"]
    urlList=[]

    f = open("/home/henson/Documents/coding/pachong/vp_ms.csv", "a+", encoding='utf-8')
    writer = csv.writer(f)
    writer.writerow(["title", "author", "cited", "fund"])
    for i in  range(1,300):

        for j in range(1, 100):
            try:
                currentURL = driver.current_url
                now_handle = driver.current_window_handle  # 获取当前窗口句柄
                all_handles = driver.window_handles  # 获取所有窗口句柄
                for handle in all_handles:
                    if (handle != now_handle):
                        driver.switch_to_window(handle)
                        # print("new web"+ driver.current_url)
                        title = driver.find_element_by_xpath(
                            '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dt/a').text
                        author = driver.find_element_by_xpath(
                            '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dd[3]/[@class="writer"]').text
                        cited = driver.find_element_by_xpath(
                            '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dt/[@class="cited"]').text
                        fund = driver.find_element_by_xpath(
                            '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dd[4]/[@class=""]').text
                        print(title)
                        print(author)
                        print(cited)
                        print(fund)
                        writer.writerows([title, author, cited, fund])  #写入CSV

            except Exception:
                try:
                    title = driver.find_element_by_xpath(
                        '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dt/a').text
                    author = driver.find_element_by_xpath(
                        '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dd[3]/span[2]').text
                    cited = driver.find_element_by_xpath(
                        '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dt/span/a').text
                    fund = driver.find_element_by_xpath('//*[@id="body"]/div/div[3]/div[4]/dl[9]/dd[4]/span').text
                    continue;
                except Exception:
                    break;
                break;

        now_handle = driver.current_window_handle  # 获取当前窗口句柄
        all_handles = driver.window_handles  # 获取所有窗口句柄
        driver.find_element_by_xpath(
            '//*[@id="body"]/div/div[3]/div[6]/div[2]/span[2]/a[2]').click()  # 下一页
        time.sleep(2)
        currentURL = driver.current_url

    f.close()

思维的定性让我想着要是没有的数据Xpath找不到的元素直接能跳过,却从未认真观察分析其中的构造。其实稍微观察就能发现,很多东西都是有一定的格式和规律的,如果内容为null,但是它的格式还在那,所以完全没有必要去想有没有可以直接跳过的Xpath,即使没有文本内容,很多情况下,它的标签格式是一定的,它一定都还在哪里。后知后觉——

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值