#include <intrin.h> #pragma intrinsic(_ReadWriteBarrier) inline void HcPause(int32_t delay) { #if _M_IX86 _asm { mov eax, delay L1: pause add eax, -1 jne L1 } #elif ((__linux__ || __FreeBSD__)&& (__i386__)) for (int32_t i = 0; i < delay; i++) { __asm__ __volatile__("pause;"); } #elif _AIX HcYield(); #endif return; } inline int32_t AtomicCmpswp(volatile void *addr, int32_t new_value, int32_t comparand) { #if _M_IX86 _ReadWriteBarrier(); __asm { mov edx, addr mov ecx, new_value mov eax, comparand lock cmpxchg[edx], ecx } _ReadWriteBarrier(); //return InterlockedCompareExchange((LPLONG)addr, new_value, comparand); #elif _AIX __asm__ __volatile__ ("sync/n"); // memory release operation compare_and_swap((atomic_p)addr, &comparand, new_value); __asm__ __volatile__ ("sync/n"); // memory acquire operation return comparand; #endif } inline int32_t AtomicFetchAdd(volatile void *addr, int32_t addend) { #if _M_IX86 int32_t result; __asm { mov edx, addr mov eax, addend lock xadd [edx], eax mov result, eax } return result; //return InterlockedExchangeAdd((LPLONG)addr, addend); #elif _AIX __asm__ __volatile__ ("sync/n"); // memory release operation fetch_and_add((atomic_p)addr, addend); __asm__ __volatile__ ("sync/n"); // memory acquire operation #endif } inline int32_t AtomicFetchStore(volatile void *addr, int32_t value) { int32_t result; __asm { mov edx, addr mov eax, value lock xchg [edx], eax mov result, eax } return result; //return InterlockedExchange((LPLONG)addr, new_value, comparand); } inline int32_t AtomicFetchIncr(volatile void *addr) { #if _WIN32 return InterlockedIncrement((LPLONG)addr); #elif _AIX return (int32_t)fetch_and_add32((atomic_p)addr, 1) + 1; #endif } extern inline int32_t AtomicFetchDecr(volatile void *addr) { #if _WIN32 return InterlockedDecrement((LPLONG)addr); #elif _AIX return (int32_t)fetch_and_add32((atomic_p)addr, -1) - 1; #endif }