在做爬虫的时候出现了这个错误ValueError: Missing scheme in request url: /chl/cb85046d16433f71bdfb.html 请大神看下

# -*- coding: utf-8 -*-
import time
import scrapy
import datetime
from scrapy.http import Request
from bs4 import BeautifulSoup
from selenium import webdriver
import os
import requests
from ..items import CompanyItem, CompanyItemLoader
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from scrapy.http import HtmlResponse
import sys
sys.setrecursionlimit(1000000)
class FalvcrawlerSpider(scrapy.Spider):
    def parse(self, response):
        pass
    name = 'wxgov'
    allowed_domains = ['pkulaw.com']
    start_urls = ['http://www.pkulaw.com/']
    time.sleep(2)
    def __init__(self,*args, **kwargs):
        super().__init__(**kwargs)
        super().__init__(*args)
        self.brower = None
        self.wait = None
        self.pagenumber = None

    def start_requests(self):
        print("++++++++++++++++++++++++++++=")
        base_url = 'http://www.pkulaw.com/'
        index_flag = {'index_flag': 'fetch index page', 'brower': None, 'wait': None, 'pagenumber': None}
        yield scrapy.Request(url=base_url,callback=self.parse_index, meta=index_flag,  dont_filter=True)

    def parse_index(self, response):
        print("kkkkkkkkkkkkkkkkkkkkkkkkkk")
        self.pagenumber = response.meta['pagenumber']
        # 初始化spider中的brower和wait
        self.brower = response.meta['brower']
        self.wait = response.meta['wait']
        for url in self.parse_url(response):
            yield scrapy.Request(url=url, callback=self.parse_detail, dont_filter=True)
        # 翻页并解析
        for pagenumber in range(2, int(self.pagenumber) + 1):
            response = self.next_page()
            for url in self.parse_url(response):
                yield scrapy.Request(url=url, callback=self.parse_detail, dont_filter=True)

    def next_page(self):
        print("ggggggggggggggggggggggggggg")
        """
              用selenium模拟翻页动作
              :return:
        """
        try:
            # 用xpath找这个下一页按钮在span标签的class="pager_next "加了个空格!!!
            next_page_button = self.wait.until(EC.presence_of_element_located((
                By.XPATH, '//*[@id="rightContent"]/div[2]/div/div[3]/ul/li[11]/a'
            )))
            print("next_page_button++++++",next_page_button)
            next_page_button.click()
            self.wait.until(EC.visibility_of_all_elements_located((By.XPATH, '//*[@id="rightContent"]/div[2]/div/ul')))
            # 控制翻页速度
            time.sleep(2)
            body = self.brower.page_source
            response = HtmlResponse(url=self.brower.current_url, body=body, encoding='utf-8')
            return response
        except TimeoutException:
            pass

    @ staticmethod
    def parse_url(response):
        """
              解析出每页列表页各项法规信息的url
              :param response: 列表页response
              :return: 该列表页各项法规详情页的url列表
        """
        url_selector = response.xpath('//*[@id="rightContent"]/div[2]/div/ul/li')
        url_list = []
        for selector in url_selector:
            url = selector.xpath('//*[@id="rightContent"]/div[2]/div/ul/li[1]/div/div[1]/h4/a/@href').extract_first()
            url_list.append(url)
        return url_list

    @ staticmethod
    def parse_detail(self,response):
        """
        解析每一页各个法规信息的详情每个列表页的HtmlResponse实例
        :param response:
        :return:
        """
        print("ffffffff",response)
        item_loader = CompanyItemLoader(item=CompanyItem(), response=response)
        item_loader.add_xpath('lav_title', '//*[@id="gridleft"]/div/div[1]/h2')
        item_loader.add_xpath('lav_Publishing_department', '//*[@id="gridleft"]/div/div[1]/div[2]/ul/li[1]')
        item_loader.add_xpath('lav_President_Order', '//*[@id="gridleft"]/div/div[1]/div[2]/ul/li[2]')
        item_loader.add_xpath('lav_pubdatetime', '//*[@id="gridleft"]/div/div[1]/div[2]/ul/li[3]')
        item_loader.add_xpath('lav_activetime', '//*[@id="gridleft"]/div/div[1]/div[2]/ul/li[4]')
        item_loader.add_xpath('lav_isvalid', '//*[@id="gridleft"]/div/div[1]/div[2]/ul/li[5]')
        item_loader.add_xpath('lav_Levelof_effectiveness', '//*[@id="gridleft"]/div/div[1]/div[2]/ul/li[6]')
        item_loader.add_xpath('lav_Regulations_category', '//*[@id="gridleft"]/div/div[1]/div[2]/ul/li[7]')
        item_loader.add_xpath('lav_content', '//*[@id="divFullText"]')
        item = item_loader.load_item()
        print("ggggg",item)
        yield item

import datetime
from scrapy import Item,Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose, Join
class CompanyItemLoader(ItemLoader):
    default_output_processor = TakeFirst()

class CompanyItem(Item):
    # 标题
    lav_title = Field(
        input_processor=MapCompose(lambda x: x.replace(' ', ''), lambda x: x.strip())
    )
    # 发布部门
    lav_Publishing_department = Field(
        input_processor=MapCompose(lambda x: x.replace(' ', ''), lambda x: x.strip())
    )
    # 发文字号
    lav_President_Order = Field(
        input_processor=MapCompose(lambda x: x.replace(' ', ''), lambda x: x.strip())
    )
    # 发布日期
    lav_pubdatetime = Field(
        input_processor=MapCompose(lambda x: x.replace('\xa0 ', '').strip(), lambda x: x[:-6])
    )
    #实施日期
    lav_activetime = Field(
        input_processor=MapCompose(lambda x: x.replace('\xa0 ', '').strip(), lambda x: x[:-6])
    )
    # 时效性
    lav_isvalid = Field(
        input_processor=MapCompose(lambda x: x.replace(' ', ''), lambda x: x.strip())
    )
    #效力级别
    lav_Levelof_effectiveness = Field(
        input_processor=MapCompose(lambda x: x.replace(' ', ''), lambda x: x.strip())
    )
    # 法规类别
    lav_Regulations_category = Field(
        input_processor=MapCompose(lambda x: x.replace(' ', ''), lambda x: x.strip())
    )

    #文章内容
    lav_content = Field(
        input_processor=MapCompose(lambda x: x.replace('\xa0\xa0\xa0\xa0', '').replace('\xa0', ''),
                                   lambda x: x.replace('\n', '').replace(' ', ''))
    )




import os
import json
import base64
from scrapy.http import HtmlResponse
from scrapy.http import Request
from selenium.webdriver import Chrome
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from fake_useragent import UserAgent
class FalvcrawlerDownloaderMiddleware(object):
    def __init__(self, username, password):
        # 用户名
        self.username = username
        # 用户密码
        self.password = password
        # 搜索关键字
        #self.job_keywords = job_keywords
        # Chrome浏览器初始化
        self.brower = Chrome()
        # Chrome浏览器窗口最大化
        self.brower.maximize_window()
        # Chrome浏览器等待加载超时时间
        self.wait = WebDriverWait(self.brower, 5)

    @classmethod
    def from_crawler(cls, crawler):
        """
        从setting.py文件提取出用户名、用户密码、搜索城市和搜索职位
        :rtype:
        :param crawler:
        :return:
        """
        return cls(
            username=crawler.settings.get('USERNAME'),
            password=crawler.settings.get('PASSWORD')
        )

    def is_logined(self, request, spider):
        """
        :param request: 初始请求request,其meta包含index_page属性
        :param spider:
        :return: 已经登陆返回True, 否则返回False
        """
        self.brower.get(request.url)
        try:
            login_status = self.wait.until(
                EC.presence_of_element_located((By.XPATH, '//*[@id="newloginbtn"]')))
            # 若右上角显示为登陆,则说明用户还没有登陆
            if login_status.text == '登录':
                return False
            else:
                return True
        except TimeoutException as e:
            # 二次请求,不会出现地址框,需要重新设计
            spider.logger.info('Locate Username Element Failed:%s' % e.msg)
            return False

    def login_lagou(self, spider):
        """
        用selenium模拟登陆流程,并将登陆成功后的cookies保存为本地文件。
        :param spider:
        :return:
        """
        try:
            # 设置等待时间,否则会出现登陆元素查找不到的异常
            time.sleep(2)
            # 点击进入登录页面
            login_status = self.wait.until(EC.presence_of_element_located(
                (By.XPATH, '//*[@id="newloginbtn"]')))
            login_status.click()
            # 输入用户名
            username = self.wait.until(
                EC.visibility_of_element_located((By.XPATH, '//*[@id="inputUserName"]')))
            username.send_keys(self.username)
            # 输入用户密码
            password = self.wait.until(
                EC.visibility_of_element_located((By.XPATH, '//*[@id="inputPwd"]')))
            password.send_keys(self.password)
            # 点击登陆按钮
            submit_button = self.wait.until(
                EC.visibility_of_element_located((By.XPATH, '//*[@id="loginByUserName"]')))
            submit_button.click()
            # time.sleep(1)
            # 获取登录成功后的cookies
            cookies = self.brower.get_cookies()
            # 保存登陆后的cookies
            self.save_cookies(cookies)
        except TimeoutException as e:
            spider.logger.info('Locate Login Element Failed: %s' % e.msg)

    @staticmethod
    def save_cookies(cookies):
        """
        登陆成功后,将cookie保存为本地文件,供下次程序运行或者以后使用
        :param cookies:
        :return:
        """
        path = os.getcwd() + '/cookies/'
        if not os.path.exists(path):
            os.mkdir(path)
        with open(path + 'falv.txt', 'w') as f:
            f.write(json.dumps(cookies))

    def fetch_index_page(self, request, spider):
        """
        页面没有成功跳转,则会因为149行的代码,抛出NoSuchElementException,而在load_cookies()
        函数报一个NoneType没有get_cookies()的错误。原因是response是空的。
        :param request:
        :param spider:
        :return:
        """
        try:
         

            #点击不分组跳转
            keywords_more = self.wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="leftContent"]/div[1]/div[3]/ul/li[1]/a')))
            print("--keywords_more 1",keywords_more)
            keywords_more.click()
            print("--keywords_more 2",keywords_more)
            self.wait.until(EC.visibility_of_all_elements_located((By.XPATH, '//*[@id="rightContent"]/div[2]/div')))
            pagenumber = self.wait.until(EC.presence_of_element_located((
                By.XPATH,
                '//*[@id="rightContent"]/div[2]/div/div[3]/ul/li[10]/a'
            )))
            print("--keywords_more 3", pagenumber)
            # 获取一共有多少页,供通过response传递到parse_detail函数,进行后续的翻页解析使用
            request.meta['pagenumber'] = pagenumber.text
            # 将brower和wait通过response传递到parse_detail函数,进行后续的翻页解析使用
            request.meta['brower'] = self.brower
            request.meta['wait'] = self.wait
            body = self.brower.page_source
            # 返回初始搜索页面,在parse_detail函数中进行相关信息的解析
            response = HtmlResponse(
                url=self.brower.current_url,
                body=body,
                encoding='utf-8',
                request=request
            )
            return response
        except TimeoutException:
            spider.logger.info('Locate Index Element Failed And Use Proxy Request Again')
            # except NoSuchElementException:
            # 如果捕捉到该异常,说明页面被重定向了,没有正常跳转,重新请求输入关键字页面
            return request

    def load_cookies(self, path):
        """
        加载本地cookies文件,实现免登录访问
        :param path: 本地cookies文件路径
        :return:
        """
        with open(path, 'r') as f:
            cookies = json.loads(f.read())
            for cookie in cookies:
                cookies_dict = {'name': cookie['name'], 'value': cookie['value']}
                self.brower.add_cookie(cookies_dict)

    def process_request(self, request, spider):
        """
        middleware的核心函数,每个request都会经过该函数。此函数过滤出初始request和详情页request,
        对于初始request进行验证登陆、cookies等一系列操作,然后将最后获取到的索引页response返回,对
        于详情页的request则,不做任何处理。
        :param request:
        :param spider:
        :return:
        """
        # 过滤出初始的登陆、切换索引页的request
        if 'index_flag' in request.meta.keys():
            # 判断是否为登陆状态,若未登陆则判断是否有cookies文件存在
            if not self.is_logined(request, spider):
                path = os.getcwd() + '/cookies/falv.txt'
                # 若cookies文件存在,则加载cookie文件,否则进行登陆操作
                if os.path.exists(path):
                    self.load_cookies(path)
                else:
                    self.login_lagou(spider)
                response = self.fetch_index_page(request, spider)
                return response

class RandomUserAgentMiddleware(object):
    """
    给每一个request添加随机的User-Agent
    """
    def __init__(self, ua_type=None):
        super(RandomUserAgentMiddleware, self).__init__()
        self.ua = UserAgent()
        self.ua_type = ua_type

    @classmethod
    def from_crawler(cls, crawler):
        """
        获取setting.py中配置的RANDOM_UA_TYPE,如果没有配置,则使用默认值random
        :param crawler:
        :return:
        """
        return cls(
            ua_type=crawler.settings.get('RANDOM_UA_TYPE', 'random')
        )

    def process_request(self, request, spider):
        print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
        """
        UserAgentMiddleware的核心方法,getattr(A, B)相当于A.B,也就是获取A
        对象的B属性,在这就相当于ua.random
        :param request:
        :param spider:
        :return:
        """
        request.headers.setdefault('User-Agent', getattr(self.ua, self.ua_type))
        # 每个请求禁止重定向
        request.meta['dont_redirect'] = True
        request.meta['handle_httpstatus_list'] = [301, 302]
        spider.logger.debug('The <{}> User Agent Is: {}'.format(request.url, getattr(self.ua, self.ua_type)))

class AbuYunProxyMiddleware(object):
    """
    接入阿布云代理服务器,该服务器动态IP1秒最多请求5次。需要在setting中设置下载延迟
    """
    def __init__(self, settings):
        self.proxy_server = settings.get('PROXY_SERVER')
        self.proxy_user = settings.get('PROXY_USER')
        self.proxy_pass = settings.get('PROXY_PASS')
        self.proxy_authorization = 'Basic ' + base64.urlsafe_b64encode(
            bytes((self.proxy_user + ':' + self.proxy_pass), 'ascii')).decode('utf8')

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
             settings=crawler.settings
        )

    def process_request(self, request, spider):
        request.meta['proxy'] = self.proxy_server
        request.headers['Proxy-Authorization'] = self.proxy_authorization
        spider.logger.debug('The {} Use AbuProxy'.format(request.url))



# -*- coding: utf-8 -*-
# Define your item pipelines here
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
# -*- coding: utf-8 -*-
# Define your item pipelines here
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
# 此类是把信息写入文档,写入时末尾都加了一个逗号,是为了数据的观看与直观性
# 也方便以后用mysql语言直接导入数据
#import pymysql
#import datetime
#from pymongo import MongoClient
import pymysql
class FalvcrawlerPipelines(object):
    # 打开数据库连接
    db = pymysql.connect("localhost", "root", " ", "autotext")
    # 使用cursor()方法获取操作游标
    cursor = db.cursor()
    # SQL 插入语句
    sql = "INSERT INTO itext(lav_title,lav_regulations_category,lav_isvalid,lav_Levelof_effectiveness,lav_President_Order,lav_Publishing_department,lav_pubdatetime,lav_activetime,lav_content,lav_url) VALUES(?,?,?,?,?,?,?,?,?,?)"
    try:
        # 执行sql语句
        cursor.execute(sql)
        # 执行sql语句
        db.commit()
    except:
        # 发生错误时回滚
        db.rollback()
    # 关闭数据库连接
        db.close()

# -*- coding: utf-8 -*-
# Scrapy settings for wxgovScrapy project
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wxgovScrapy'
SPIDER_MODULES = ['wxgovScrapy.spiders']
NEWSPIDER_MODULE = 'wxgovScrapy.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wxgovScrapy (+http://www.pkulaw.cn)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# 用户名
USERNAME = '1809****0048'
# 用户密码
PASSWORD = 'djy521'

PROXY_SERVER = "https://www.huaweicloud.com"
#阿布云代理隧道验证信息,注册阿布云购买服务后获取
PROXY_USER = 'Administrator'
PROXY_PASS = 'DJYdb521'

#启用限速设置
AUTOTHROTTLE_ENABLED = True
AUTOTHROTTLE_START_DELAY = 0.2  # 初始下载延迟


MYSQL_HOST = '127.0.0.1'
MYSQL_USER = 'root'

#你自己数据库的密码
MYSQL_PASSWORD = ''
MYSQL_PORT = 3306

#你自己数据库的名称
MYSQL_DB = 'autotext'
CHARSET = 'utf8'



# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.2
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Host': 'www.pkulaw.cn',
    'Referer': 'http://www.pkulaw.cn/',
    'Connection': 'keep-alive'
}

# 设置UA为随机挑选模式
RANDOM_UA_TYPE = 'random'

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'wxgovScrapy.middlewares.RandomUserAgentMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    'wxgovScrapy.middlewares.AbuYunProxyMiddleware': 1,
    'wxgovScrapy.middlewares.RandomUserAgentMiddleware': 542,
    # 禁用框架默认启动的UserAgentMiddleware
    'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
    'wxgovScrapy.middlewares.FalvcrawlerDownloaderMiddleware': 543,
}

#Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'wxgovScrapy.pipelines.FalvcrawlerPipelines': 300,
}

MY_USER_AGENT = ["Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
                 ,"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
                 ,"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/58.0"
                 ,"Mozilla/5.0+(Windows+NT+5.1)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/28.0.1500.95+Safari/537.36+SE+2.X+MetaSr+1.0"
                 ,"Mozilla/5.0+(Windows+NT+6.1;+WOW64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/50.0.2657.3+Safari/537.36"]



# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}



# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值