维普页面信息抓取

53 篇文章 1 订阅
18 篇文章 0 订阅
#!/usr/bin/python
#encoding=utf-8
__author__ = 'Administrator'
from  bs4 import  BeautifulSoup
from prettytable import PrettyTable
import selenium
import sys
import urllib
import requests
import time
import re
import  csv
if __name__ == "__main__":
    import os
    from selenium import webdriver
    from selenium.webdriver.support.ui import WebDriverWait

    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}

    chromedriver = "/home/henson/Documents/pycharm/webdriver/chromedriver"
    os.environ["webdriver.chrome.driver"] = chromedriver
    driver = webdriver.Chrome(chromedriver)
    driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
    driver.get('http://qikan.cqvip.com/zk/search.aspx?key=M%3d%E5%A4%A7%E6%B0%94%E6%B1%A1%E6%9F%93%E9%98%B2%E6%B2%BB&page=1&ids=&size=50#search-result-list')
    #inputElement = driver.find_element_by_xpath("//*[@id='b_Text0']")#//*[@id="b_Text0"]
    urlList=[]
    #title = driver.find_element_by_xpath('//*[@id="body"]/div/div[3]/div[4]/dl[1]/dt/a').text
    #print(title)


    f = open("/home/henson/Desktop/001/758.csv", "a+", encoding='utf-8')
    writer = csv.writer(f)
    writer.writerow(["title", "author", "cited", "fund"])
    x = PrettyTable(["number", "title", "author", "fund"])
    for i in  range(1,300):
        for j in range(1,51):
            title = driver.find_element_by_xpath(
                '//*[@id="body"]/div/div[3]/div[4]/dl['+str(j)+']/dt/a').text
            author = driver.find_element_by_xpath(
                '//*[@id="body"]/div/div[3]/div[4]/dl['+str(j)+']/dd[3]/span[@class="writer"]').text
            cited = driver.find_element_by_xpath(
                '//*[@id="body"]/div/div[3]/div[4]/dl['+ str(j)+']/dt/span[@class="cited"]').text
            print(title)
            print(author)
            print(cited)
            try:
                fund = driver.find_element_by_xpath(
                    '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dd[4]/span[@class="fund"]').text
                print(fund)
            except Exception:
                try:
                    title = driver.find_element_by_xpath(
                        '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dt/a').text
                    author = driver.find_element_by_xpath(
                        '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dd[3]/span[@class="writer"]').text
                    cited = driver.find_element_by_xpath(
                        '//*[@id="body"]/div/div[3]/div[4]/dl[' + str(j) + ']/dt/span[@class="cited"]').text
                    fund=' '
                    continue;
                except Exception:
                    break;
                break;
            #writer.writerows([title.replace(',',''),author.replace(',',''),cited.replace(',',''),fund.replace(',','')])  #写入CSV
            data = []
            #data.append((title, author, cited, fund))
            data.append((title,'+', author,'+', cited,'+' ,fund))   #通过+或者其他符号通过xls分列
            writer.writerows(data)




                #print(title)
                #print(author)
                #print(fund)
                #data={number:[title,author,fund]}
                #data=number+" "+title+" "+author.replace("\n", " ")+" "+fund.replace("\n", " ")
               # print('%-4s%-60s%-30s%-100s' %(number,title,author.replace("\n", " "),fund.replace("\n", " ")))
                #print(data)
                #print({number},{title},{author.replace("\n", " ")},{fund.replace("\n", " ")})
                #f=open('/home/henson/Documents/coding/pachong/plus.csv','wb')
                #f = open("/home/henson/Documents/coding/pachong/test.csv", "a+")
                #f = open("/home/henson/Documents/coding/pachong/test.txt", "a+",encoding='utf-8')
                #writer = csv.writer(f)
                #writer.writerow(["number", "title", "author", "fund"])

                #number=number.encode("utf-8")
                #title = title.encode("utf-8")
                #author = title.author("utf-8")
                #fund = fund.author("utf-8")
                #用replace把逗号替换掉

                    # 先写入columns_name

                    # 写入多行用writerows
                #writer.writerows([number,title,author,fund])
                #f.close()


            x.border = False
            x.align["number"] = "l"
            x.valign["title"] = "t"

            x.padding_width = 10
            x.add_row([title, author.replace("\n", " "), cited, fund.replace("\n", " ")])
            if j % 10 == 0:
                print(x)  # 迭代次数 每个多少输出一次?


            def myAlign(string, length):
                if length == 0:
                    return string
                slen = len(string)
                re = string
                if isinstance(string, str):
                    placeholder = ' '
                else:
                    placeholder = u' '
                while slen < length:
                    re += placeholder
                    slen += 1
                return re



                #print(number,myAlign(title,60)+str+myAlign(author.replace("\n", " "),120),fund.replace("\n", " "))


        now_handle = driver.current_window_handle  # 获取当前窗口句柄
        all_handles = driver.window_handles  # 获取所有窗口句柄
        driver.find_element_by_xpath(
            '//*[@id="body"]/div/div[3]/div[6]/div[2]/span[2]/a[2]').click()  # 下一页
        time.sleep(2)
        for handle in all_handles:
            if (handle != now_handle):
                driver.switch_to_window(handle)
                # print("new web"+ driver.current_url)
    f.close()

data = []
#data.append((title, author, cited, fund))
data.append((title,’+’, author,’+’, cited,’+’ ,fund)) #通过+或者其他符号通过xls分列
writer.writerows(data)

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值