__this_cpu_write

#define BH_LRU_SIZE    16

struct bh_lru {
    struct buffer_head *bhs[BH_LRU_SIZE];
};

static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};

__this_cpu_write(bh_lrus.bhs[0], bh);

#define __this_cpu_write(pcp, val)                    \
({                                    \
    __this_cpu_preempt_check("write");                \
    raw_cpu_write(pcp, val);                    \
})

#define raw_cpu_write(pcp, val)        __pcpu_size_call(raw_cpu_write_, pcp, val)

#define __pcpu_size_call(stem, variable, ...)                \
do {                                    \
    __verify_pcpu_ptr(&(variable));                    \
    switch(sizeof(variable)) {                    \
        case 1: stem##1(variable, __VA_ARGS__);break;        \
        case 2: stem##2(variable, __VA_ARGS__);break;        \
        case 4: stem##4(variable, __VA_ARGS__);break;        \
        case 8: stem##8(variable, __VA_ARGS__);break;        \
        default:                         \
            __bad_size_call_parameter();break;        \
    }                                \
} while (0)

raw_cpu_write_2

#define raw_cpu_write_2(pcp, val)    raw_cpu_generic_to_op(pcp, val, =)

#define raw_cpu_generic_to_op(pcp, val, op)                \
do {                                    \
    *raw_cpu_ptr(&(pcp)) op val;                    \
} while (0)

*raw_cpu_ptr(&(bh_lrus.bhs[0])) = bh;  

#define raw_cpu_ptr(ptr)    per_cpu_ptr(ptr, 0)

*per_cpu_ptr(&(bh_lrus.bhs[0]), 0) = bh;  

#define per_cpu_ptr(ptr, cpu)    ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })

#define VERIFY_PERCPU_PTR(__p)                        \
({                                    \
    __verify_pcpu_ptr(__p);                        \
    (typeof(*(__p)) __kernel __force *)(__p);            \
})

#define __verify_pcpu_ptr(ptr)                        \
do {                                    \
    const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;    \
    (void)__vpp_verify;                        \
} while (0)

atomic_thread_fence is a function in C++ that ensures atomicity of operations in a multi-threaded environment. It is used to prevent unwanted reordering of memory operations by the compiler, CPU or the cache, which may lead to race conditions or other synchronization issues. The function has four possible memory order parameters: - memory_order_acquire: it ensures that all memory operations before the fence are visible to the current thread. - memory_order_release: it ensures that all memory operations after the fence are visible to other threads. - memory_order_acq_rel: it combines the effects of memory_order_acquire and memory_order_release. - memory_order_seq_cst: it ensures that all memory operations before and after the fence are visible to all threads in a sequentially consistent order. Here is an example of a use case for atomic_thread_fence: ``` #include <atomic> #include <thread> #include <iostream> std::atomic<int> x = {0}; std::atomic<int> y = {0}; bool flag = false; void write_x_then_y() { x.store(1, std::memory_order_relaxed); std::atomic_thread_fence(std::memory_order_release); y.store(1, std::memory_order_relaxed); } void read_y_then_x() { while (!flag); std::atomic_thread_fence(std::memory_order_acquire); if (y.load(std::memory_order_relaxed) == 1 && x.load(std::memory_order_relaxed) == 0) { std::cout << "Race condition detected!\n"; } } int main() { std::thread t1(write_x_then_y); std::thread t2(read_y_then_x); t1.join(); flag = true; t2.join(); return 0; } ``` In this example, two threads are created: t1 writes a value to x, then y, while t2 reads y and x in that order. Without the atomic_thread_fence, t2 could read x before y, which would lead to a race condition. However, the use of the acquire and release memory orders ensures that the operations are performed atomically, and the fence prevents reordering.
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值