现在的网站基本上都会有防爬虫机制,其中最常用的就是根据ip来ban爬虫,因此当需要有大量的请求时,就需要用到代理请求了。
爬虫
本次爬取的是的网页是www.xicidaili.com
这个网站中将代理ip进行了分类,比如http的或者是https的
例如http代理的url为http://www.xicidaili.com/wt/
定义的容器
class GetproxyItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
ip = scrapy.Field()
port = scrapy.Field()
protocol = scrapy.Field()
delay = scrapy.Field()
pass
爬虫
import re
import scrapy
from bs4.testing import BeautifulSoup
from scrapy.http import Request
from getProxy.items import GetproxyItem
from getProxy.settings import MAX_DELAY
class Myspider(scrapy.Spider):
name = 'proxyip'
bashurl = [
#'http://www.xicidaili.com/wn/',#https
'http://www.xicidaili.com/wt/'#http
]
def start_requests(self):
length = len(self.bashurl)
for i in range(0, length):
url = self.bashurl[i]
yield Request(url, self.parse)
def parse(self, response):
max_num = BeautifulSoup(response.text,'lxml').find('div',class_= 'pagination').find_all('a')[-2].get_text()
bashurl = str(response.url)
for num in range(1, int(max_num) +1):
url = bashurl + str(num)
yield Request(url,callback = self.get_proxy)
def get_proxy(self, response):
item = GetproxyItem()
proxys = BeautifulSoup(response.text, 'lxml').find('table',id = 'ip_list')
length = len(proxys.find_all('tr'))
for i in range(1,length):
element = proxys.find_all('tr')[i]
item['ip'] = element.find_all('td')[1].get_text()
#item['ip'] += ':' + element.find_all('td')[2].get_text()
item['port'] = element.find_all('td')[2].get_text()
item['protocol'] = element.find_all('td')[5].get_text()
item['delay'] = element.find_all('td')[6].find('div').attrs['title'][:-1]
if float(item['delay']) > MAX_DELAY:
continue
yield item
配置文件setting
#筛选掉延迟高于2秒的ip
MAX_DELAY = 2.0
执行爬虫的时候使用命令
scrapy crawl proxyip -o ../proxy_http.json'
即可将爬取的IP保存到项目根目录的proxy_http.json文件中