Scrapy+Selenium---进阶用法+高级

一、Scrapy

爬取某网页的音乐的各个信息

在所需的文件中所在命令窗口创建一个项目scrapy startproject 名字

步骤1、在items文件中写入需要的类

import scrapy
from scrapy import Field,Item

class TestwangyiItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title=Field()
    number=Field()
    music_name=Field()
    img_src=Field()
    music_src=Field()
    music_singer=Field()
    pass

步骤2、在spiders文件中新建文件写爬虫内容

from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
from testwangyi.items import TestwangyiItem
import re
class music(CrawlSpider):
    name = 'wangyi'
    start_urls=['http://xyq.163.com/download/down_music.html']
    def parse(self, response):   #parse函数,默认是执行stat_urls的网址的全部内容
        # print(response.text)
        item=TestwangyiItem()    ######items中的类文件
        reg=re.compile('.*下载.*')
        selector=Selector(response)
        # 获取所有a
        # all_a=selector.xpath("//a[@download]").re(reg)
        # all_a = selector.xpath("//a[@download]").extract() #字符串列表
        all_a=selector.css('td.tTitle ::text').extract()
        #table下所有的A
        # all_a = selector.xpath("//table//tr").extract()
        for i in all_a:
            item['title'] = i
            yield item     #返回items运行的类
            # num=i.xpath('//td').extract()
            # print(num)

步骤3、在settings文件中设置代理

############头部代理
USER_AGENT='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3)'\
' AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5'

FEED_URI=u'file:///D:/pycharm2017.3.2/work/scrapy 0608/doubanTU/douban.csv'
#保存的文件位置和文件名字
FEED_FORMAT = 'CSV'
#保存的文件格式
# Obey robots.txt rules  #是否遵守协议
ROBOTSTXT_OBEY = True
'''DOWNLOAD_DELAY = 3          延迟显示##############'''
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}
#默认的请求头部
 Override the default request headers:
DEFAULT_REQUEST_HEADERS = { 

   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
   'Accept-Language': 'en',
}
##########中间键
 Enable or disable spider middlewares        
 See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {                              
    'testwangyi.middlewares.TestwangyiSpiderMiddleware': 543,
}
 Enable or disable downloader middlewares ###########启用或停用中间键
 See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {  ############################下载器
    'testwangyi.middlewares.TestwangyiDownloaderMiddleware': 543,
}
# Configure item pipelines      ######开启保存文件模式
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'testwangyi.pipelines.TestwangyiPipeline': 300,
}

步骤4、middlewares中间器设置代理

在第二个class 中有diergprocess_response的def函数中添加代理ip和头部信息

from scrapy import signals
import random

class TestwangyiSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class TestwangyiDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):   #设置代理###################################################
        # Called with the response returned from the downloader.
        user_agent_list = []
        ######################################设置请求头部
        request.headers.setdefault('User-Agent',random.choice(user_agent_list))
        #设置代理IP
        ipdaili=[]
        request.bindaddress = random.choice(ipdaili)#绑定某一个地址
        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

步骤5、pipelines存储爬取的内容

import openpyxl
class TestwangyiPipeline(object):
    wb=openpyxl.Workbook()
    ws=wb.active    #激活
    ws.append(['标题'])
    def process_item(self, item, spider):
        #设置变量存储list的形式的值,用来append()
        line=[item["title"]]
        self.ws.append(line)
        self.wb.save("music.xlsx")
        # print(item)
        return item

步骤6、main创建总函数运行

from scrapy import cmdline
cmdline.execute('scrapy crawl wangyi'.split())

二、Selenium

安装

安装的版本在http://chromedriver.storage.googleapis.com/index.html选择,下载之后,在浏览器所在的位置,直接复制进去。
pip install selenium

1、打开某个网页

from selenium import webdriver
url = 'https://www.baidu.com'
driver = webdriver.Chrome()
driver.get(url)

2、打开网页,获取某个信息

driver = webdriver.Chrome()
driver.find_elements_by_class_name()
driver.find_elements_by_tag_name()
driver.find_elements_by_id()
driver.switch_to.frame( id 或 者 name )

from selenium import webdriver
url="https://movie.douban.com/chart?qq-pf-to=pcqq.group"
driver=webdriver.Chrome()
# driver.maximize_window()
driver.get(url)

tables=driver.find_elements_by_tag_name("table")
tables.pop(0)
for i,v in enumerate(tables):
    # name=v.find_elements_by_class_name('pl2')[0].find_elements_by_tag_name("a")[0].text
    name=v.find_elements_by_class_name('pl2')[0].find_elements_by_class_name('pl')[0].text
    num=v.find_elements_by_class_name('pl2')[0].find_elements_by_class_name('pl')[1].text
    score = v.find_elements_by_class_name('pl2')[0].find_elements_by_class_name('pl')[0].text
    print(score)
    print(num)
# print(li_list)

3打开百度,定位搜索框搜索

from selenium import webdriver

url="http://www.baidu.com"
driver=webdriver.Chrome()
driver.maximize_window()    # 全屏
driver.get(url)
element=driver.find_element_by_id('kw')
element.send_keys("python")
driver.find_element_by_id("su").click()

4、打开网页,通过标签定位

from selenium import webdriver

driver=webdriver.Chrome()
driver.get('http://www.runoob.com/python3/python3-file-methods.html')
element = driver.find_element_by_tag_name("title")
target = driver.find_element_by_id("footer")

from selenium.webdriver import ActionChains
action_chains = ActionChains(driver)
action_chains.drag_and_drop(element, target).perform()

5、下拉滚轮的用法

5.1方法1

# fourth 类似于按键盘上的PgDn按键,可以换成Keys.DOWN(下箭头,不过这个下拉进度比较慢)  
for i in range(6):  
    ActionChains(driver).send_keys(Keys.PAGE_DOWN).perform()  
    time.sleep(1)  

time.sleep(3)  
driver.close() 

5.1方法2

Python也可以模拟鼠标和键盘的操作,不过要注意的是键盘带来的屏幕游标位置的挪动和鼠标在屏幕上的挪动位置,两个是不同的。

  首先要在文件头引入
from selenium.webdriver.common.action_chains import ActionChains
#定义一个函数
def Transfer_Clicks(browser):
    browser.execute_script("window.scrollBy(0,-document.body.scrollHeight)","") 

   #这个是执行一段Javascript函数,将网页滚到到网页顶部。
        try:
            inputs1 = browser.find_elements_by_class_name("feedAttr_transfer")
            for input1 in inputs1:
                try:
                    ActionChains(browser).click(input1).perform()

                   #模拟鼠标点击控件input1,此时的鼠标位置在input1处
                    browser.execute_script("window.scrollBy(0,200)","")
                    #向下滚动200个像素,鼠标位置也跟着变了
                    ActionChains(browser).move_by_offset(0,-80).perform()

                   #向上移动鼠标80个像素,水平方向不同
                    ActionChains(browser).click().perform()

                   #鼠标左键点击
                    ActionChains(browser).key_down(Keys.TAB).perform()
                    #模拟tab键的输入

                    ActionChains(browser).send_keys(Keys.ENTER).perform()
                    #模拟输入ENTER键
                except:
                    pass
        except:
            pass
        return "Transfer successfully \n"

6、切换不同的frame

driver.switch_to.frame(reference)    # 切换到子 frame
driver.switch_to.parent_frame()      # 切回到父 frame      
driver.switch_to.default_content()   # 切回主文档

7、百度帐号的登录处理

#-*-coding:utf-8-*-
import time
from selenium import webdriver
driver = webdriver.Chrome()
url = "https://www.baidu.com/"
driver.get(url)
# 分析网页,找到登录标签
# login = driver.find_element_by_id("u1").find_elements_by_class_name('lb')[0]
login = driver.find_elements_by_css_selector('div[id=u1] a[class=lb]')[0]
login.click()  #点击登录按钮
##找出登录界面的用户名登录
time.sleep(5)
uernameLogin = driver.find_elements_by_css_selector('p.tang-pass-footerBarULogin')[0]
uernameLogin.click()
time.sleep(3)
#找到输入的用户和密码框,并设置内容
uername = driver.find_element_by_id('TANGRAM__PSP_10__userName')##用户名框
uername.send_keys('13515167409')
time.sleep(3)
password = driver.find_element_by_id('TANGRAM__PSP_10__password')#密码框
password.send_keys('LYly12345678')
time.sleep(3)
anniu = driver.find_element_by_id('TANGRAM__PSP_10__submit')#登录按钮
anniu.click()

要是有图片验证码的处理,详见https://www.cnblogs.com/mypath/articles/6646858.html

三、Scrapy高级(抓取51JOB下一页按钮的内容)

新建一个项目文件scray startproject 文件名

1、在items文件中写类

import scrapy
from scrapy import Item,Field
class DoubiItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    jobname = Field()
    companyname = Field()
    address = Field()
    money = Field()
    releasetime = Field()
    pass

2写爬虫

from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
from doubi.items import DoubiItem
from scrapy.http import Request

list1 =[]
class Job(CrawlSpider):
    name = "doubi"
    pageNumber = 1
    start_urls = ["https://search.51job.com/list/020000,000000,0000,00,9,99,%25E4%25BA%25BA%25E5%25B7%25A5%25E6%2599%25BA%25E8%2583%25BD,2,1.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=4&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="]
    def parse(self, response):
        Job.pageNumber+=1
        item = DoubiItem()  #获取类
        selector = Selector(response)  #网页内容
        Infos = selector.xpath('//div[@id="resultList"][1]//div[@class="el"]')  #信息2
        list2 = []
        for each in Infos:
            jobname = each.xpath("p/span/a/@title")[0].extract()
            companyname = each.xpath('span[@class="t2"]/a/@title')[0].extract()
            address = each.xpath('span[2]/text()')[0].extract()
            money = each.xpath('span[3]/text()')[0].extract()
            releasetime = each.xpath('span[4]/text()')[0].extract()

            # print(jobname,companyname,address,money,releasetime)
            item['jobname'] = jobname
            item['companyname'] = companyname
            item['address'] = address
            item['money'] = money
            item['releasetime'] = releasetime
            yield item
        nextLink = selector.xpath('//div[@id="resultList"][1]//li[@class="bk"][2]/a/@href').extract()[0]
        if Job.pageNumber<5 and nextLink:
            yield Request(nextLink,callback=self.parse)  #找到下一页连接的地址

3、设置setting间隔

DOWNLOAD_DELAY = 3 '''每次爬取的间隔'''

4、pipelines文件中新建一个保存文件夹的类

import json
import xlwt
class ExcelDoubiPipeline(object):
    index = 0
    def __init__(self):
        self.wb = xlwt.Workbook()
        self.sheet = self.wb.add_sheet("jobdoubi")
        list2 = ["职位名","公司名","地点","薪资","发布时间"]
        for i,v in enumerate(list2):
            self.sheet.write(ExcelDoubiPipeline.index,i,v)

    def process_item(self,item,spider):
        ExcelDoubiPipeline.index += 1
        for i,v in enumerate(item.keys()):
            self.sheet.write(ExcelDoubiPipeline.index,i,item[v])
        return item
    def close_spider(self,spider):
        self.wb.save("jobdoubi2.xls")

5、新建一个住文件夹

from scrapy import cmdline
cmdline.execute('scrapy crawl doubi'.split())

四、Selenium高级用法

1、拖拽网页滚轮

#-*-coding:utf-8-*-
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
driver = webdriver.Chrome()
url="https://www.tmall.com"
driver.get(url)
driver.maximize_window()
for i in range(10):
    time.sleep(5)
    # driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
    driver.execute_script("window.scrollBy(0,200)","")  #滚动多少
    # ActionChains(driver).send_keys(Keys.PAGE_DOWN).perform()

2、用来打开百度来登陆帐号

#-*-coding:utf-8-*-
import time
from selenium import webdriver
driver = webdriver.Chrome()
url = "https://www.baidu.com/"
driver.get(url)
# 分析网页,找到登录标签
# login = driver.find_element_by_id("u1").find_elements_by_class_name('lb')[0]
login = driver.find_elements_by_css_selector('div[id=u1] a[class=lb]')[0]
login.click()  #点击登录按钮
##找出登录界面的用户名登录
time.sleep(5)
uernameLogin = driver.find_elements_by_css_selector('p.tang-pass-footerBarULogin')[0]
uernameLogin.click()
time.sleep(3)
#找到输入的用户和密码框,并设置内容
uername = driver.find_element_by_id('TANGRAM__PSP_10__userName')##用户名框
uername.send_keys('13515167409')
time.sleep(3)
password = driver.find_element_by_id('TANGRAM__PSP_10__password')#密码框
password.send_keys('LYly12345678')
time.sleep(3)
anniu = driver.find_element_by_id('TANGRAM__PSP_10__submit')#登录按钮
anniu.click()

3、简单抓取网易云音乐

from selenium import webdriver
driver = webdriver.Chrome()
url = 'https://music.163.com/#/discover/toplist'
driver.get(url)

# driver.switch_to.frame('g_iframe') ,括号内是 iframe 的id 或者name
# 对 切换frame失败的,如下
# 先获取frame对象 ,再用driver.switch_to.frame()
myiframe = driver.find_element_by_id('g_iframe')
driver.switch_to.frame(myiframe)
parentDiv = driver.find_element_by_id('song-list-pre-cache')
table = parentDiv.find_elements_by_tag_name('table')[0]

tbody = table.find_elements_by_tag_name('tbody')[0]

trs = tbody.find_elements_by_tag_name('tr')

for each in trs:
    print(each.find_elements_by_tag_name('td')[0].text)
    print(each.find_elements_by_tag_name('td')[3].find_elements_by_tag_name('div')[0].get_attribute('title'))
# print(myiframe)
  • 1
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值