item文件定义爬取的内容
import scrapy
class DailiItem (scrapy.Item) :
country = scrapy.Field()
agent_ip = scrapy.Field()
agent_port = scrapy.Field()
agent_addr = scrapy.Field()
anonymity = scrapy.Field()
agent_type = scrapy.Field()
survival_time = scrapy.Field()
verify_time = scrapy.Field()
dailiSpider蜘蛛文件
import scrapy
from urllib import parse
from daili.items import DailiItem
class DailispiderSpider (scrapy.Spider) :
name = 'dailiSpider'
allowed_domains = ['xicidaili.com' ]
start_urls = ['http://www.xicidaili.com/' ]
def parse (self, response) :
item = DailiItem()
base_url = 'http://www.xicidaili.com/'
tr_list = response.xpath('//*[@id="ip_list"]//tr' )
for tr in tr_list:
tr_class = tr.xpath('@class' ).extract_first()
if tr_class == '' or tr_class == 'odd' :
item['country' ] = tr.xpath('td[1]/img/@src' ).extract_first()
item['agent_ip' ] = tr.xpath('td[2]/text()' ).extract_first()
item['agent_port' ] = tr.xpath('td[3]/text()' ).extract_first()
item['agent_addr' ] = tr.xpath('td[4]/text()' ).extract_first()
item['anonymity' ] = tr.xpath('td[5]/text()' ).extract_first()
item['agent_type' ] = tr.xpath('td[6]/text()' ).extract_first()
item['survival_time' ] = tr.xpath('td[7]/text()' ).extract_first()
item['verify_time' ] = tr.xpath('td[8]/text()' ).extract_first()
yield item
elif tr_class == None :
a_href = tr.xpath('th/a/@href' ).extract_first()
url = parse.urljoin(base_url, a_href)
yield scrapy.Request(url=url, callback=self.parse)
else :
pass
1.保存在MongoDB中
import pymongo
class DailiPipeline (object) :
def __init__ (self) :
self.mongo_client = pymongo.MongoClient(host='47.98.173.29' , port=27017 )
self.db = self.mongo_client.daili
self.connection = self.db.agent
def process_item (self, item, spider) :
self.connection.insert(dict(item))
return item
2.保存在本地.txt文档中
import time
import os
class DailiPipeline (object) :
def process_item (self, item, spider) :
now = time.strftime('%Y%m%d' , time.localtime())
filename = 'Daili' + now + '.txt'
with open(filename, 'a' , encoding='utf-8' ) as fp:
imgname = os.path.basename(item['country' ])
fp.write(imgname + ' ' )
if os.path.exists(imgname):
pass
else :
with open(imgname, 'wb' ) as fp:
response = request.urlopen(item['country' ])
fp.write(response.read())
fp.write(item['agent_ip' ] + ' ' )
fp.write(item['agent_port' ] + ' ' )
fp.write(item['agent_addr' ] + ' ' )
fp.write(item['anonymity' ] + ' ' )
fp.write(item['agent_type' ] + ' ' )
fp.write(item['survival_time' ] + ' ' )
fp.write(item['verify_time' ] + '\n\n' )
time.sleep(1 )
return item
3.保存在本地.json文档中
import codecs
import json
import time
from urllib import request
class DailiPipeline (object) :
def process_item (self, item, spider) :
now = time.strftime('%Y%m%d' , time.localtime())
filename = 'Daili' + now + '.json'
with codecs.open(filename, 'a' , encoding='utf-8' ) as fp:
line = json.dumps(dict(item), ensure_ascii=False ) + '\n'
fp.write(line)
return item