python nonlocal 什么意思_python nonlocal的理解使用

nonlocal 可以将一个变量声明为非本地变量, 在python的lru_cache看到了使用

def decorator(func):

a = 1

def wrapper(*args, **kwargs):

nonlocal a

a += 1

return func()

return wrapper

实例中, 当a变量是不可变类型时, 因为包装函数引用了a, 装饰器执行结束, 在包装函数里改变a的值, 需要用nonlocal声明a变量. (a是自由变量了)

当a是可变类型时, 可以不用声明nonlocal a

自己再本地试一遍能理解的更加深入

lru_cache源码中的使用, 用来记录hit和miss

只贴出包装函数的部分

f _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):

# Constants shared by all lru cache instances:

sentinel = object() # unique object used to signal cache misses

make_key = _make_key # build a key from the function arguments

PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields

cache = {}

hits = misses = 0

full = False

cache_get = cache.get

cache_len = cache.__len__

lock = RLock()

root = []

root[:] = [root, root, None, None]

if maxsize == 0:

def wrapper(*args, **kwds):

nonlocal misses # 要改变misses,所以用nonlocal声明

misses += 1

result = user_function(*args, **kwds)

return result

elif maxsize is None:

def wrapper(*args, **kwds):

# Simple caching without ordering or size limit

nonlocal hits, misses

key = make_key(args, kwds, typed)

result = cache_get(key, sentinel)

if result is not sentinel:

hits += 1

return result

misses += 1

result = user_function(*args, **kwds)

cache[key] = result

return result

else:

def wrapper(*args, **kwds):

# Size limited caching that tracks accesses by recency

nonlocal root, hits, misses, full

key = make_key(args, kwds, typed)

with lock:

link = cache_get(key)

if link is not None:

# Move the link to the front of the circular queue

link_prev, link_next, _key, result = link

link_prev[NEXT] = link_next

link_next[PREV] = link_prev

last = root[PREV]

last[NEXT] = root[PREV] = link

link[PREV] = last

link[NEXT] = root

hits += 1

return result

misses += 1

result = user_function(*args, **kwds)

with lock:

if key in cache:

# Getting here means that this same key was added to the

# cache while the lock was released. Since the link

# update is already done, we need only return the

# computed result and update the count of misses.

pass

elif full:

# Use the old root to store the new key and result.

oldroot = root

oldroot[KEY] = key

oldroot[RESULT] = result

# Empty the oldest link and make it the new root.

# Keep a reference to the old key and old result to

# prevent their ref counts from going to zero during the

# update. That will prevent potentially arbitrary object

# clean-up code (i.e. __del__) from running while we're

# still adjusting the links.

root = oldroot[NEXT]

oldkey = root[KEY]

oldresult = root[RESULT]

root[KEY] = root[RESULT] = None

# Now update the cache dictionary.

del cache[oldkey]

# Save the potentially reentrant cache[key] assignment

# for last, after the root and links have been put in

# a consistent state.

cache[key] = oldroot

else:

# Put result in a new link at the front of the queue.

last = root[PREV]

link = [last, root, key, result]

last[NEXT] = root[PREV] = cache[key] = link

# Use the cache_len bound method instead of the len() function

# which could potentially be wrapped in an lru_cache itself.

full = (cache_len() >= maxsize)

return result

def cache_info():

"""Report cache statistics"""

with lock:

return _CacheInfo(hits, misses, maxsize, cache_len())

def cache_clear():

"""Clear the cache and cache statistics"""

nonlocal hits, misses, full

with lock:

cache.clear()

root[:] = [root, root, None, None]

hits = misses = 0

full = False

wrapper.cache_info = cache_info

wrapper.cache_clear = cache_clear

return wrapper

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值