爬虫时,如果不使用pymongo的close方法,python xxx.py内存会一点一点的上涨,最开始900多M,慢慢的就1个多G,快到2G了。但是速度快,在tail -f log的时候,基本看不清输出信息。
如果使用了close方法,内存稳定在500M左右,但是读和取数据库速度慢,在tail log的时候,能看到正在保存哪个URL,正在获取哪个URL。
请问我该如何解决这个问题呢。
#-*-coding:utf-8-*-
import logging
import setting
import time,datetime
from setting import mongo_host,mongo_port,mongo_db_name_data,mongo_db_name_linkbase,mongo_db_name_task
import pymongo
logging.basicConfig(filename='log',level=logging.INFO)
class Connect_mongo(object):
def __init__(self):
self.mongo_host = mongo_host
self.mongo_port = mongo_port
self.conn()
def conn(self):
self.client = pymongo.MongoClient(host=self.mongo_host,port=self.mongo_port)
self.db_data = self.client[mongo_db_name_data]
self.db_linkbase = self.client[mongo_db_name_linkbase]
self.db_linkbase_collection = self.db_linkbase.linkbase
self.db_task = self.client[mongo_db_name_task]
def insert_db(self,item):
setting.my_logger.info('当前插入数据库的最终数据为%s'%item)
self.db_data.xxx_data.update({"car_id":item['car_id']},item,True)
self.client.close()
def save_linkbase(self,response_result,spider_name,hash_url,item_type):
if item_type == 'carinfo_item':
linkinfo = {}
linkinfo['status'] = response_result.status_code
linkinfo['url'] = response_result.url
linkinfo['spider_name'] = spider_name
linkinfo['hash_url'] = hash_url
#保存到linkbase
self.db_linkbase_collection.update({"status":linkinfo['status'],"hash_url":hash_url},linkinfo,True)
self.client.close()
else:
self.db_linkbase_collection.create_index([("over_time", pymongo.ASCENDING)], expireAfterSeconds=7200)
linkinfo = {}
linkinfo['status'] = response_result.status_code
linkinfo['url'] = response_result.url
linkinfo['spider_name'] = spider_name
linkinfo['hash_url'] = hash_url
linkinfo['over_time'] = datetime.datetime.utcnow()
#保存到linkbase
self.db_linkbase_collection.update({"status":linkinfo['status'],"hash_url":hash_url},linkinfo,True)
self.client.close()
def save_task(self,task):
setting.my_logger.info('当前插入数据库的task信息为%s'%task)
self.db_task.xxx_task.update({'url':task['url']},task,True)
self.client.close()
def get_task(self,max_requests=10):
task = []
for i in range(max_requests):
result = self.db_task.xxx_task.find_one_and_delete({})
task.append(result)
return task
def duplicate_removal(self,hash_data):
result = self.db_linkbase.linkbase.find_one({'hash_url':hash_data})
if result == None:
return True
else:
return False
mongo_insert = Connect_mongo()
在另一个py文件中使用requests进行爬虫和xpath进行处理,然后存储或取数据库。