使用zc.B列表毕竟可以带来很好的效果,在创建数据库时设置“cache_size”选项可以控制将留在RAM中的数据的大小。如果你不这样做的话,旧内存的大小会变得更大。”事务.提交“经常这样。通过定义一个大的缓存大小事务.提交通常,blist最后访问的bucket将保留在RAM中,这样您就可以快速访问它们,并且RAM的使用量不会增长太多。在
打包是非常昂贵的,但如果你有一个大的硬盘,你不必经常这样做。在
这里有一些代码可以自己尝试。在后台运行“top”并更改cache峎u大小,以查看它如何影响已使用的RAM量。在import time
import os
import glob
from ZODB import DB
from ZODB.FileStorage import FileStorage
import transaction
from zc.blist import BList
print('STARTING')
random = open('/dev/urandom', 'rb')
def test_list(my_list, loops = 1000, element_size = 100):
print('testing list')
start = time.time()
for loop in xrange(loops):
my_list.append(random.read(element_size))
print('appending %s elements took %.4f seconds' % (loops, time.time() - start))
start = time.time()
length = len(my_list)
print('length calculated in %.4f seconds' % (time.time() - start,))
start = time.time()
for loop in xrange(loops):
my_list.insert(length / 2, random.read(element_size))
print('inserting %s elements took %.4f seconds' % (loops, time.time() - start))
start = time.time()
for loop in xrange(loops):
my_list[loop] = my_list[loop][1:] + my_list[loop][0]
print('modifying %s elements took %.4f seconds' % (loops, time.time() - start))
start = time.time()
for loop in xrange(loops):
del my_list[0]
print('removing %s elements took %.4f seconds' % (loops, time.time() - start))
start = time.time()
transaction.commit()
print('committing all above took %.4f seconds' % (time.time() - start,))
del my_list[:loops]
transaction.commit()
start = time.time()
pack()
print('packing after removing %s elements took %.4f seconds' % (loops, time.time() - start))
for filename in glob.glob('database.db*'):
try:
os.unlink(filename)
except OSError:
pass
db = DB(FileStorage('database.db'),
cache_size = 2000)
def pack():
db.pack()
root = db.open().root()
root['my_list'] = BList()
print('inserting initial data to blist')
for loop in xrange(10):
root['my_list'].extend(random.read(100) for x in xrange(100000))
transaction.commit()
transaction.commit()
test_list(root['my_list'])