- 解决的问题是什么
所有竞态原因
实现
- 32bit
// 下面为 atomic_add 的定义
arch/arm/include/asm/atomic.h
36 #define ATOMIC_OP(op, c_op, asm_op) \
37 static inline void atomic_##op(int i, atomic_t *v) \
38 { \
39 unsigned long tmp; \
40 int result; \
41 \
42 prefetchw(&v->counter); \
43 __asm__ __volatile__("@ atomic_" #op "\n" \
44 "1: ldrex %0, [%3]\n" \
45 " " #asm_op " %0, %0, %4\n" \
46 " strex %1, %0, [%3]\n" \
47 " teq %1, #0\n" \
48 " bne 1b" \
49 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
50 : "r" (&v->counter), "Ir" (i) \
51 : "cc"); \
52 }
...
218 #define ATOMIC_OPS(op, c_op, asm_op) \
219 ATOMIC_OP(op, c_op, asm_op) \
220 ATOMIC_OP_RETURN(op, c_op, asm_op) \
221 ATOMIC_FETCH_OP(op, c_op, asm_op)
...
223 ATOMIC_OPS(add, +=, add)
- 64bit
arch/arm/include/asm/atomic.h
304 #define ATOMIC64_OP(op, op1, op2) \
305 static inline void atomic64_##op(s64 i, atomic64_t *v) \
306 { \
307 s64 result; \
308 unsigned long tmp; \
309 \
310 prefetchw(&v->counter); \
311 __asm__ __volatile__("@ atomic64_" #op "\n" \
312 "1: ldrexd %0, %H0, [%3]\n" \
313 " " #op1 " %Q0, %Q0, %Q4\n" \
314 " " #op2 " %R0, %R0, %R4\n" \
315 " strexd %1, %0, %H0, [%3]\n" \
316 " teq %1, #0\n" \
317 " bne 1b" \
318 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
319 : "r" (&v->counter), "r" (i) \
320 : "cc"); \
321 }
- 1bit
arch/arm/include/asm/bitops.h
31 /*
32 * These functions are the basis of our bit ops.
33 *
34 * First, the atomic bitops. These use native endian.
35 */
36 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
37 {
38 unsigned long flags;
39 unsigned long mask = BIT_MASK(bit);
40
41 p += BIT_WORD(bit);
42
43 raw_local_irq_save(flags);
44 *p |= mask;
45 raw_local_irq_restore(flags);
46 }
...
180 #define ATOMIC_BITOP(name,nr,p) \
181 (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
...
189 #define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p)