atomic operation and spinlock

spinlock的实现建立在atomic operations之上。atomic operation最终是通过ldrex/strex指令实现的。

Atomic Operations on Integers

include/linux/types.h
typedef struct {
    int counter;
} atomic_t;

arch/arm/include/asm/atomic.h

ARM Cortex-A9:    architecture: v7-A
#if __LINUX_ARM_ARCH__ >= 6
/*
 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 * store exclusive to ensure that these are atomic.  We may loop
 * to ensure that the update happens.
 */

static inline void atomic_add(int i, atomic_t *v)
{
    unsigned long tmp;
    int result;

    __asm__ __volatile__("@ atomic_add\n"
"1:    ldrex    %0, [%3]\n"
"    add    %0, %0, %4\n"
"    strex    %1, %0, [%3]\n"
"    teq    %1, #0\n"
"    bne    1b"
    : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
    : "r" (&v->counter), "Ir" (i)
    : "cc");
}

#error SMP not supported on pre-ARMv6 CPUs

LDREX and STREX


The LDREX and STREX instructions split the operation of atomically updating memory into two separate steps.
Together, they provide atomic updates in conjunction with exclusive monitors that track exclusive memory accesses,
see Exclusive monitors. Load-Exclusive and Store-Exclusive must only access memory regions marked as Normal.

LDREX
The LDREX instruction loads a word from memory, initializing the state of the exclusive monitor(s)
to track the synchronization operation.
For example, LDREX R1, [R0] performs a Load-Exclusive from the address in R0,
places the value into R1 and updates the exclusive monitor(s).

STREX
The STREX instruction performs a conditional store of a word to memory.
If the exclusive monitor(s) permit the store, the operation updates the memory location and
returns the value 0 in the destination register, indicating that the operation succeeded.
If the exclusive monitor(s) do not permit the store, the operation does not update the memory location
and returns the value 1 in the destination register.
This makes it possible to implement conditional execution paths based on the success or failure of the memory operation.
For example, STREX R2, R1, [R0] performs a Store-Exclusive operation to the address in R0,
conditionally storing the value from R1 and indicating success or failure in R2.
Alternative exclusive access sizes


The ARMv7 architecture added these to the Thumb instruction set in the A and R profiles. ARMv7-M supports the byte and halfword but not the doubleword variants. ARMv6-M does not support exclusive accesses.

The architecture requires that each Load-Exclusive instruction must be used only with the corresponding Store-Exclusive instruction, for example LDREXB must only be used with STREXB.

spin lock

i nclude/linux/spinlock.h
crash> spinlock
struct spinlock {
    union {
        struct raw_spinlock rlock;
    };
}
SIZE: 20
crash> raw_spinlock
struct raw_spinlock {
    arch_spinlock_t raw_lock;
    unsigned int break_lock;
    unsigned int magic;
    unsigned int owner_cpu;
    void *owner;
}
SIZE: 20
crash> arch_spinlock_t
typedef struct {
    volatile unsigned int lock;
} arch_spinlock_t;
SIZE: 4

#define spin_lock_init(_lock)                \
do {                            \
    spinlock_check(_lock);                \
    raw_spin_lock_init(&(_lock)->rlock);        \
} while (0)

# define raw_spin_lock_init(lock)                \
    do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)

#define __RAW_SPIN_LOCK_UNLOCKED(lockname)    \
    (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)

#define __RAW_SPIN_LOCK_INITIALIZER(lockname)    \
    {                    \
    .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,    \
    SPIN_DEBUG_INIT(lockname)        \/*CONFIG_DEBUG_SPINLOCK & CONFIG_DEBUG_LOCK_ALLOC*/
    SPIN_DEP_MAP_INIT(lockname) }

void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
    __raw_spin_lock(lock);
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
    preempt_disable();
    spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
    LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#define LOCK_CONTENDED(_lock, try, lock) \
    lock(_lock)

static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
    __acquire(lock);
    arch_spin_lock(&lock->raw_lock);
}

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
    unsigned long tmp;

    __asm__ __volatile__(
"1:    ldrex    %0, [%1]\n"
"    teq    %0, #0\n"
    WFE("ne")
"    strexeq    %0, %2, [%1]\n"
"    teqeq    %0, #0\n"
"    bne    1b"
    : "=&r" (tmp)
    : "r" (&lock->lock), "r" (1)
    : "cc");

    smp_mb();
}


#define spin_lock_irqsave(lock, flags)                \
do {                                \
    raw_spin_lock_irqsave(spinlock_check(lock), flags);    \
} while (0)

#define raw_spin_lock_irqsave(lock, flags)        \
    do {                        \
        typecheck(unsigned long, flags);    \
        _raw_spin_lock_irqsave(lock, flags);    \
    } while (0)

static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)

{
    unsigned long flags;

    local_irq_save(flags);
    preempt_disable();
    spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);

    do_raw_spin_lock_flags(lock, &flags);

    return flags;
}

Exclusive monitors

http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dht0008a/CJAGCFAF.html


An exclusive monitor is a simple state machine, with the possiblestates open and exclusive.To support synchronization between processors, a system must implement twosets of monitors, local and global. A Load-Exclusive operation updatesthe monitors to exclusive state. A Store-Exclusive operation accessesthe monitor(s) to determine whether it can complete successfully.A Store-Exclusive can succeed only if all accessed exclusive monitorsare in the exclusive state.



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值