atomic64_cmpxchg、atomic_cmpxchg是宏定义,第一个参数使用强制类型转换,移植代码到64位系统时容易入坑。
问题代码
以下代码移植到arm64后,出现BUG,64位的指针,低32位为想要的值,高32位还是原先的值。
struct property *property = NULL;
struct property *result;
result = kmalloc(sizeof(struct property), GFP_KERNEL);
if (atomic_cmpxchg((atomic_t *)&property, 0, (int)result) != 0) {
free(result);
}
atomic[64]_t
include/linux/types.h
174 typedef struct {
175 int counter;
176 } atomic_t;
177
178 #ifdef CONFIG_64BIT
179 typedef struct {
180 long counter;
181 } atomic64_t;
182 #endif
arm
arch/arm/include/asm/atomic.h
268 #ifndef CONFIG_GENERIC_ATOMIC64
269 typedef struct {
270 long long counter;
271 } atomic64_t;
272
273 #define ATOMIC64_INIT(i) { (i) }
atomic[64]_cmpxchg
arm64
atomic64_cmpxchg、atomic_cmpxchg实际上是相同的,宏定义位于arch/arm64/include/asm/atomic.h文件。# CONFIG_ARM64_LSE_ATOMICS is not set,使用arch/arm64/include/asm/atomic_ll_sc.h。atomic64_cmpxchg cmpxchg都是宏定义,第一个参数(指针指向的内容),决定按32位还是64位操作。
120 #define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
196 #define atomic64_cmpxchg atomic_cmpxchg
cmpxchg[64]
cmpxchg64、cmpxchg也是相同的,宏定义位于arch/arm64/include/asm/cmpxchg.h文件。
155 /* cmpxchg */
156 #define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
157 #define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
158 #define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
159 #define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
160 #define cmpxchg_local cmpxchg_relaxed
161
162 /* cmpxchg64 */
163 #define cmpxchg64_relaxed cmpxchg_relaxed
164 #define cmpxchg64_acquire cmpxchg_acquire
165 #define cmpxchg64_release cmpxchg_release
166 #define cmpxchg64 cmpxchg
167 #define cmpxchg64_local cmpxchg_local
__cmpxchg_wrapper
146 #define __cmpxchg_wrapper(sfx, ptr, o, n) \
147 ({ \
148 __typeof__(*(ptr)) __ret; \
149 __ret = (__typeof__(*(ptr))) \
150 __cmpxchg##sfx((ptr), (unsigned long)(o), \
151 (unsigned long)(n), sizeof(*(ptr))); \
152 __ret; \
153 })
154
__cmpxchg_mb
__CMPXCHG_GEN宏会根据指针指向数据类型的尺寸进行不同处理。
117 #define __CMPXCHG_GEN(sfx) \
118 static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
119 unsigned long old, \
120 unsigned long new, \
121 int size) \
122 { \
123 switch (size) { \
124 case 1: \
125 return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
126 case 2: \
127 return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
128 case 4: \
129 return __cmpxchg_case##sfx##_4(ptr, old, new); \
130 case 8: \
131 return __cmpxchg_case##sfx##_8(ptr, old, new); \
132 default: \
133 BUILD_BUG(); \
134 } \
135 \
136 unreachable(); \
137 }
138
139 __CMPXCHG_GEN()
140 __CMPXCHG_GEN(_acq)
141 __CMPXCHG_GEN(_rel)
142 __CMPXCHG_GEN(_mb)
__cmpxchg_case_mb_[1248]
__CMPXCHG_CASE宏,最终定义了cmpxchg的具体实现。
251 #define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
252 __LL_SC_INLINE unsigned long \
253 __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
254 unsigned long old, \
255 unsigned long new)) \
256 { \
257 unsigned long tmp, oldval; \
258 \
259 asm volatile( \
260 " prfm pstl1strm, %[v]\n" \
261 "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
262 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
263 " cbnz %" #w "[tmp], 2f\n" \
264 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
265 " cbnz %w[tmp], 1b\n" \
266 " " #mb "\n" \
267 " mov %" #w "[oldval], %" #w "[old]\n" \
268 "2:" \
269 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
270 [v] "+Q" (*(unsigned long *)ptr) \
271 : [old] "Lr" (old), [new] "r" (new) \
272 : cl); \
273 \
274 return oldval; \
275 } \
276 __LL_SC_EXPORT(__cmpxchg_case_##name);
277
278 __CMPXCHG_CASE(w, b, 1, , , , )
279 __CMPXCHG_CASE(w, h, 2, , , , )
280 __CMPXCHG_CASE(w, , 4, , , , )
281 __CMPXCHG_CASE( , , 8, , , , )
282 __CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
283 __CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
284 __CMPXCHG_CASE(w, , acq_4, , a, , "memory")
285 __CMPXCHG_CASE( , , acq_8, , a, , "memory")
286 __CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
287 __CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
288 __CMPXCHG_CASE(w, , rel_4, , , l, "memory")
289 __CMPXCHG_CASE( , , rel_8, , , l, "memory")
290 __CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
291 __CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
292 __CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
293 __CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
294
295 #undef __CMPXCHG_CASE
generic
CONFIG_GENERIC_ATOMIC64=y定义时,在lib/atomic64.c实现通用的atomic64_cmpxchg函数。
include/linux/atomic.h
include/asm-generic/atomic64.h
152 long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
153 {
154 unsigned long flags;
155 raw_spinlock_t *lock = lock_addr(v);
156 long long val;
157
158 raw_spin_lock_irqsave(lock, flags);
159 val = v->counter;
160 if (val == o)
161 v->counter = n;
162 raw_spin_unlock_irqrestore(lock, flags);
163 return val;
164 }
165 EXPORT_SYMBOL(atomic64_cmpxchg);
atomic_long
atomic_long在64位系统上实际为atomic64,32位系统上实际为atomic。
include/asm-generic/atomic-long.h
21 #if BITS_PER_LONG == 64
22
23 typedef atomic64_t atomic_long_t;
24
25 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
26 #define ATOMIC_LONG_PFX(x) atomic64 ## x
27
28 #else
29
30 typedef atomic_t atomic_long_t;
31
32 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
33 #define ATOMIC_LONG_PFX(x) atomic ## x
34
35 #endif