最新 Objective-C objc_msgSend 消息发送底层原理

本文深入解析Objective-C的消息发送流程,包括消息接收者检查、缓存查找、方法列表遍历以及动态解析等步骤。详细阐述了 objc_msgSend 的汇编实现,包括nil检查、缓存查找算法以及在类层次结构中搜索方法的过程。
摘要由CSDN通过智能技术生成

首先判断消息接收者是否为nil  nil直接返回

如果消息接收者是实例对象 它通过isa 到它的类对象的方法缓存cache(哈希表)中查找 O(1)

如果消息接收者是类对象 它就通过isa 到它的元类对象中查找

如果方法缓存cache找不到就到class_rw_t的方法列表中查找

如果方法列表是有序的 二分查找 O(logN) 方法列表没有序 就普通线性遍历查找 O(N)

如果找到了就调用方法 结束查找 并把方法 缓存到缓存列表中

如果没找到且有父类就继续 到父类查找 重复上面的过程 直到基类NSObeject 都没找到 就进入动态解析流程了

最新源码 https://opensource.apple.com/tarballs/objc4/objc4-781.tar.gz

_objc_msgSend 因为经常调用 为了高效 是用汇编写的(汇编虽然大学学过 基本忘了)

ENTRY _objc_msgSend
UNWIND _objc_msgSend, NoFrame
// p0寄存器:消息接收者,receiver
cmp    p0, #0            // nil check and tagged pointer check
#if SUPPORT_TAGGED_POINTERS
b.le    LNilOrTagged        //  (MSB tagged pointer looks negative)
#else
b.eq    LReturnZero
#endif
ldr    p13, [x0]        // p13 = isa
GetClassFromIsa_p16 p13        // p16 = class
LGetIsaDone:
// calls imp or objc_msgSend_uncached
// 查找缓存
CacheLookup NORMAL, _objc_msgSend

#if SUPPORT_TAGGED_POINTERS
LNilOrTagged:
b.eq    LReturnZero        // nil check

// tagged
adrp    x10, _objc_debug_taggedpointer_classes@PAGE
add    x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
ubfx    x11, x0, #60, #4
ldr    x16, [x10, x11, LSL #3]
adrp    x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGE
add    x10, x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGEOFF
cmp    x10, x16
b.ne    LGetIsaDone

// ext tagged
adrp    x10, _objc_debug_taggedpointer_ext_classes@PAGE
add    x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF
ubfx    x11, x0, #52, #8
ldr    x16, [x10, x11, LSL #3]
b    LGetIsaDone
// SUPPORT_TAGGED_POINTERS
#endif

LReturnZero:
// x0 is already zero
mov    x1, #0
movi    d0, #0
movi    d1, #0
movi    d2, #0
movi    d3, #0
ret

END_ENTRY _objc_msgSend

CacheLookup

.macro CacheLookup
//
// Restart protocol:
//
//   As soon as we're past the LLookupStart$1 label we may have loaded
//   an invalid cache pointer or mask.
//
//   When task_restartable_ranges_synchronize() is called,
//   (or when a signal hits us) before we're past LLookupEnd$1,
//   then our PC will be reset to LLookupRecover$1 which forcefully
//   jumps to the cache-miss codepath which have the following
//   requirements:
//
//   GETIMP:
//     The cache-miss is just returning NULL (setting x0 to 0)
//
//   NORMAL and LOOKUP:
//   - x0 contains the receiver
//   - x1 contains the selector
//   - x16 contains the isa
//   - other registers are set as per calling conventions
//
LLookupStart$1:

    // p1 = SEL, p16 = isa
    ldr    p11, [x16, #CACHE]                // p11 = mask|buckets

#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
    and    p10, p11, #0x0000ffffffffffff    // p10 = buckets
    and    p12, p1, p11, LSR #48        // x12 = _cmd & mask
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
    and    p10, p11, #~0xf            // p10 = buckets
    and    p11, p11, #0xf            // p11 = maskShift
    mov    p12, #0xffff
    lsr    p11, p12, p11                // p11 = mask = 0xffff >> p11
    and    p12, p1, p11                // x12 = _cmd & mask
#else
#error Unsupported cache mask storage for ARM64.
#endif


    add    p12, p10, p12, LSL #(1+PTRSHIFT)
                     // p12 = buckets + ((_cmd & mask) << (1+PTRSHIFT))

    ldp    p17, p9, [x12]        // {imp, sel} = *bucket
1:    cmp    p9, p1            // if (bucket->sel != _cmd)
    b.ne    2f            //     scan more
    CacheHit $0            // call or return imp
    // 没有命中
2:    // not hit: p12 = not-hit bucket
    CheckMiss $0            // miss if bucket->sel == 0
    cmp    p12, p10        // wrap if bucket == buckets
    b.eq    3f
    ldp    p17, p9, [x12, #-BUCKET_SIZE]!    // {imp, sel} = *--bucket
    b    1b            // loop

3:    // wrap: p12 = first bucket, w11 = mask
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
    add    p12, p12, p11, LSR #(48 - (1+PTRSHIFT))
                    // p12 = buckets + (mask << 1+PTRSHIFT)
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
    add    p12, p12, p11, LSL #(1+PTRSHIFT)
                    // p12 = buckets + (mask << 1+PTRSHIFT)
#else
#error Unsupported cache mask storage for ARM64.
#endif

    // Clone scanning loop to miss instead of hang when cache is corrupt.
    // The slow path may detect any corruption and halt later.

    ldp    p17, p9, [x12]        // {imp, sel} = *bucket
1:    cmp    p9, p1            // if (bucket->sel != _cmd)
    b.ne    2f            //     scan more
    CacheHit $0            // call or return imp
    
2:    // not hit: p12 = not-hit bucket
    CheckMiss $0            // miss if bucket->sel == 0
    cmp    p12, p10        // wrap if bucket == buckets
    b.eq    3f
    ldp    p17, p9, [x12, #-BUCKET_SIZE]!    // {imp, sel} = *--bucket
    b    1b            // loop

LLookupEnd$1:
LLookupRecover$1:
3:    // double wrap
    JumpMiss $0

.endmacro

CheckMiss (找不到)

.macro CheckMiss
	// miss if bucket->sel == 0
.if $0 == GETIMP
	cbz	p9, LGetImpMiss
.elseif $0 == NORMAL
	cbz	p9, __objc_msgSend_uncached
.elseif $0 == LOOKUP
	cbz	p9, __objc_msgLookup_uncached
.else
.abort oops
.endif
.endmacro

__objc_msgSend_uncached

STATIC_ENTRY __objc_msgSend_uncached
UNWIND __objc_msgSend_uncached, FrameWithNoSaves

// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band p16 is the class to search

MethodTableLookup
TailCallFunctionPointer x17

END_ENTRY __objc_msgSend_uncached


STATIC_ENTRY __objc_msgLookup_uncached
UNWIND __objc_msgLookup_uncached, FrameWithNoSaves

// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band p16 is the class to search

MethodTableLookup
ret

END_ENTRY __objc_msgLookup_uncached

MethodTableLookup

.macro MethodTableLookup
    
// push frame
SignLR
stp    fp, lr, [sp, #-16]!
mov    fp, sp

// save parameter registers: x0..x8, q0..q7
sub    sp, sp, #(10*8 + 8*16)
stp    q0, q1, [sp, #(0*16)]
stp    q2, q3, [sp, #(2*16)]
stp    q4, q5, [sp, #(4*16)]
stp    q6, q7, [sp, #(6*16)]
stp    x0, x1, [sp, #(8*16+0*8)]
stp    x2, x3, [sp, #(8*16+2*8)]
stp    x4, x5, [sp, #(8*16+4*8)]
stp    x6, x7, [sp, #(8*16+6*8)]
str    x8,     [sp, #(8*16+8*8)]

// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
// receiver and selector already in x0 and x1
mov    x2, x16
mov    x3, #3
bl    _lookUpImpOrForward // C语言的函数lookUpImpOrForward

// IMP in x0
mov    x17, x0

// restore registers and return
ldp    q0, q1, [sp, #(0*16)]
ldp    q2, q3, [sp, #(2*16)]
ldp    q4, q5, [sp, #(4*16)]
ldp    q6, q7, [sp, #(6*16)]
ldp    x0, x1, [sp, #(8*16+0*8)]
ldp    x2, x3, [sp, #(8*16+2*8)]
ldp    x4, x5, [sp, #(8*16+4*8)]
ldp    x6, x7, [sp, #(8*16+6*8)]
ldr    x8,     [sp, #(8*16+8*8)]

mov    sp, fp
ldp    fp, lr, [sp], #16
AuthenticateLR

.endmacro

lookUpImpOrForward

/***********************************************************************
* lookUpImpOrForward.
* The standard IMP lookup. 
* Without LOOKUP_INITIALIZE: tries to avoid +initialize (but sometimes fails)
* Without LOOKUP_CACHE: skips optimistic unlocked lookup (but uses cache elsewhere)
* Most callers should use LOOKUP_INITIALIZE and LOOKUP_CACHE
* inst is an instance of cls or a subclass thereof, or nil if none is known. 
*   If cls is an un-initialized metaclass then a non-nil inst is faster.
* May return _objc_msgForward_impcache. IMPs destined for external use 
*   must be converted to _objc_msgForward or _objc_msgForward_stret.
*   If you don't want forwarding at all, use LOOKUP_NIL.
**********************************************************************/
IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior)
{
    const IMP forward_imp = (IMP)_objc_msgForward_impcache;
    IMP imp = nil;
    Class curClass;

    runtimeLock.assertUnlocked();

    // Optimistic cache lookup
    if (fastpath(behavior & LOOKUP_CACHE)) {
        imp = cache_getImp(cls, sel);
        if (imp) goto done_nolock;
    }

    // runtimeLock is held during isRealized and isInitialized checking
    // to prevent races against concurrent realization.

    // runtimeLock is held during method search to make
    // method-lookup + cache-fill atomic with respect to method addition.
    // Otherwise, a category could be added but ignored indefinitely because
    // the cache was re-filled with the old value after the cache flush on
    // behalf of the category.

    runtimeLock.lock();

    // We don't want people to be able to craft a binary blob that looks like
    // a class but really isn't one and do a CFI attack.
    //
    // To make these harder we want to make sure this is a class that was
    // either built into the binary or legitimately registered through
    // objc_duplicateClass, objc_initializeClassPair or objc_allocateClassPair.
    //
    // TODO: this check is quite costly during process startup.
    checkIsKnownClass(cls);

    if (slowpath(!cls->isRealized())) {
        cls = realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock);
        // runtimeLock may have been dropped but is now locked again
    }

    if (slowpath((behavior & LOOKUP_INITIALIZE) && !cls->isInitialized())) {
        cls = initializeAndLeaveLocked(cls, inst, runtimeLock);
        // runtimeLock may have been dropped but is now locked again

        // If sel == initialize, class_initialize will send +initialize and 
        // then the messenger will send +initialize again after this 
        // procedure finishes. Of course, if this is not being called 
        // from the messenger then it won't happen. 2778172
    }

    runtimeLock.assertLocked();
    curClass = cls;

    // The code used to lookpu the class's cache again right after
    // we take the lock but for the vast majority of the cases
    // evidence shows this is a miss most of the time, hence a time loss.
    //
    // The only codepath calling into this without having performed some
    // kind of cache lookup is class_getInstanceMethod().

    for (unsigned attempts = unreasonableClassCount();;) {
        // curClass method list.
        Method meth = getMethodNoSuper_nolock(curClass, sel);
        if (meth) {
            imp = meth->imp;
            goto done;
        }

        if (slowpath((curClass = curClass->superclass) == nil)) {
            // No implementation found, and method resolver didn't help.
            // Use forwarding.
            imp = forward_imp;
            break;
        }

        // Halt if there is a cycle in the superclass chain.
        if (slowpath(--attempts == 0)) {
            _objc_fatal("Memory corruption in class list.");
        }

        // Superclass cache.
        imp = cache_getImp(curClass, sel);
        if (slowpath(imp == forward_imp)) {
            // Found a forward:: entry in a superclass.
            // Stop searching, but don't cache yet; call method
            // resolver for this class first.
            break;
        }
        if (fastpath(imp)) {
            // Found the method in a superclass. Cache it in this class.
            goto done;
        }
    }

    // No implementation found. Try method resolver once.

    if (slowpath(behavior & LOOKUP_RESOLVER)) {
        behavior ^= LOOKUP_RESOLVER;
        return resolveMethod_locked(inst, sel, cls, behavior);
    }

 done: // 填充缓存
    log_and_fill_cache(cls, imp, sel, inst, curClass);
    runtimeLock.unlock();
 done_nolock:
    if (slowpath((behavior & LOOKUP_NIL) && imp == forward_imp)) {
        return nil;
    }
    return imp;
}

getMethodNoSuper_nolock

getMethodNoSuper_nolock
/***********************************************************************
 * getMethodNoSuper_nolock
 * fixme
 * Locking: runtimeLock must be read- or write-locked by the caller
 **********************************************************************/
static method_t *
getMethodNoSuper_nolock(Class cls, SEL sel)
{
    runtimeLock.assertLocked();

    ASSERT(cls->isRealized());
    // fixme nil cls? 
    // fixme nil sel?

    auto const methods = cls->data()->methods();
    for (auto mlists = methods.beginLists(),
              end = methods.endLists();
         mlists != end;
         ++mlists)
    {
        // <rdar://problem/46904873> getMethodNoSuper_nolock is the hottest
        // caller of search_method_list, inlining it turns
        // getMethodNoSuper_nolock into a frame-less function and eliminates
        // any store from this codepath.
        method_t *m = search_method_list_inline(*mlists, sel);
        if (m) return m;
    }

    return nil;
}

search_method_list_inline

ALWAYS_INLINE static method_t *
search_method_list_inline(const method_list_t *mlist, SEL sel)
{
    int methodListIsFixedUp = mlist->isFixedUp();
    int methodListHasExpectedSize = mlist->entsize() == sizeof(method_t);
    
    if (fastpath(methodListIsFixedUp && methodListHasExpectedSize)) {
        return findMethodInSortedMethodList(sel, mlist);
    } else {
        // Linear search of unsorted method list
	// 线性查找
        for (auto& meth : *mlist) {
            if (meth.name == sel) return &meth;
        }
    }

#if DEBUG
    // sanity-check negative results
    if (mlist->isFixedUp()) {
        for (auto& meth : *mlist) {
            if (meth.name == sel) {
                _objc_fatal("linear search worked when binary search did not");
            }
        }
    }
#endif

    return nil;
}

findMethodInSortedMethodList

/***********************************************************************
 * search_method_list_inline
 **********************************************************************/
ALWAYS_INLINE static method_t *
findMethodInSortedMethodList(SEL key, const method_list_t *list)
{
    ASSERT(list);

    const method_t * const first = &list->first;
    const method_t *base = first;
    const method_t *probe;
    uintptr_t keyValue = (uintptr_t)key;
    uint32_t count;
    // 二分查找
    for (count = list->count; count != 0; count >>= 1) {
        probe = base + (count >> 1);
        
        uintptr_t probeValue = (uintptr_t)probe->name;
        
        if (keyValue == probeValue) {
            // `probe` is a match.
            // Rewind looking for the *first* occurrence of this value.
            // This is required for correct category overrides.
            while (probe > first && keyValue == (uintptr_t)probe[-1].name) {
                probe--;
            }
            return (method_t *)probe;
        }
        
        if (keyValue > probeValue) {
            base = probe + 1;
            count--;
        }
    }
    
    return nil;
}

log_and_fill_cache (填充方法缓存)

/***********************************************************************
* log_and_fill_cache
* Log this method call. If the logger permits it, fill the method cache.
* cls is the method whose cache should be filled. 
* implementer is the class that owns the implementation in question.
**********************************************************************/
static void
log_and_fill_cache(Class cls, IMP imp, SEL sel, id receiver, Class implementer)
{
#if SUPPORT_MESSAGE_LOGGING
    if (slowpath(objcMsgLogEnabled && implementer)) {
        bool cacheIt = logMessageSend(implementer->isMetaClass(), 
                                      cls->nameForLogging(),
                                      implementer->nameForLogging(), 
                                      sel);
        if (!cacheIt) return;
    }
#endif
    cache_fill(cls, sel, imp, receiver);
}

void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
{
    runtimeLock.assertLocked();

#if !DEBUG_TASK_THREADS
    // Never cache before +initialize is done
    if (cls->isInitialized()) {
        cache_t *cache = getCache(cls);
#if CONFIG_USE_CACHE_LOCK
        mutex_locker_t lock(cacheUpdateLock);
#endif
        cache->insert(cls, sel, imp, receiver);
    }
#else
    _collecting_in_critical();
#endif
}

insert

ALWAYS_INLINE
void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
{
#if CONFIG_USE_CACHE_LOCK
    cacheUpdateLock.assertLocked();
#else
    runtimeLock.assertLocked();
#endif

    ASSERT(sel != 0 && cls->isInitialized());

    // Use the cache as-is if it is less than 3/4 full
    mask_t newOccupied = occupied() + 1;
    unsigned oldCapacity = capacity(), capacity = oldCapacity;
    if (slowpath(isConstantEmptyCache())) {
        // Cache is read-only. Replace it.
        if (!capacity) capacity = INIT_CACHE_SIZE;
        reallocate(oldCapacity, capacity, /* freeOld */false);
    }
    else if (fastpath(newOccupied + CACHE_END_MARKER <= capacity / 4 * 3)) {
        // Cache is less than 3/4 full. Use it as-is.
    }
    else {
        // 超过容量的3/4 扩容两倍
        capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
        if (capacity > MAX_CACHE_SIZE) {
            capacity = MAX_CACHE_SIZE;
        }
        reallocate(oldCapacity, capacity, true);
    }

    bucket_t *b = buckets();
    mask_t m = capacity - 1;
    mask_t begin = cache_hash(sel, m);
    mask_t i = begin;

    // Scan for the first unused slot and insert there.
    // There is guaranteed to be an empty slot because the
    // minimum size is 4 and we resized at 3/4 full.
    do {
        if (fastpath(b[i].sel() == 0)) {
            incrementOccupied();
            b[i].set<Atomic, Encoded>(sel, imp, cls);
            return;
        }
        if (b[i].sel() == sel) {
            // The entry was added to the cache by some other thread
            // before we grabbed the cacheUpdateLock.
            return;
        }
    } while (fastpath((i = cache_next(i, m)) != begin));

    cache_t::bad_cache(receiver, (SEL)sel, cls);
}

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值