除了信号量的另一个使用的比较多的互斥方法是自旋锁。自旋锁只有两种情况:锁住和解锁。与信号量未获得信号量时进入休眠状态不同,自旋锁会不断的去检测是否能锁上。
因为自旋锁不会进入休眠状态,所以自旋锁的临界区代码不能进入休眠状态或放弃处理器(被高优先级的线程抢占或中断),因为自旋锁的临界区代码进入休眠状态,可能其他试图获得自旋锁的线程在一直检测自旋锁,而不会放弃处理器,所以自旋锁的临界区必需是原子操作。
拥有自旋锁的代码不会被别的线程抢占。自旋锁的执行时间必须很短。
使用锁(信号量或自旋锁)时必须注意不要引起死锁,引起死锁的几种情况:
1)获得锁的代码又调用了需要获取同一个锁的函数
2)两个线程都需要获得锁a和b,但是获得锁的顺序相反
1)定义和初始化
spinlock_t mylock= SPIN_LOCK_UNLOCKED;
或
spin_lock_init(lock);
2)锁住自旋锁
spin_lock(lock);
spin_lock_irq(lock)
spin_lock_bh(lock)
read_lock_irqsave(lock, flags)
3)解锁自旋锁
spin_unlock(lock);
spin_unlock_irq(lock)
spin_unlock_bh(lock)
read_unlock_irqrestore(lock, flags)
spin_trylock(lock)
spin_trylock_bh(lock)
读取者/写入者自旋锁
读取者/写入者与自旋锁的关系与读取者/写入者信号量和信号量的关系一样,使用方法也一样。
1)定义和初始化
rwlock_t my_rwlock = RW_LOCK_UNLOCKED;
或
rwlock_init(lock)
2)读锁
read_lock(lock)
read_lock_irqsave(lock)
read_lock_irq(lock)
read_lock_bh(lock)
read_unlock(lock)
read_unlock_irqrestore(lock)
read_unlock_irq(lock)
read_unlock_bh(lock)3)写锁
write_lock(lock)
write_lock_irqsave(lock)
write_lock_irq(lock)
write_lock_bh(lock)
write_unlock(lock)
write_unlock_irqrestore(lock)
write_unlock_irq(lock)
write_unlock_bh(lock)
<linux/spinlock.h>头文件
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
/*
* include/linux/spinlock.h - generic spinlock/rwlock declarations
*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
/* * Must define these before including other files, inline functions need them */ #define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME #define LOCK_SECTION_START(extra) \ ".subsection 1\n\t" \ extra \ ".ifndef " LOCK_SECTION_NAME "\n\t" \ LOCK_SECTION_NAME ":\n\t" \ ".endif\n" #define LOCK_SECTION_END \ ".previous\n\t" #define __lockfunc __attribute__((section(".spinlock.text"))) /* * Pull the raw_spinlock_t and raw_rwlock_t definitions: */ #include
extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); /* * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): */ #ifdef CONFIG_SMP # include
#else # include
#endif #ifdef CONFIG_DEBUG_SPINLOCK extern void __spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key); # define spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __spin_lock_init((lock), #lock, &__key); \ } while (0) #else # define spin_lock_init(lock) \ do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key); # define rwlock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __rwlock_init((lock), #lock, &__key); \ } while (0) #else # define rwlock_init(lock) \ do { *(lock) = RW_LOCK_UNLOCKED; } while (0) #endif #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) #ifdef CONFIG_GENERIC_LOCKBREAK #define spin_is_contended(lock) ((lock)->break_lock) #else #ifdef __raw_spin_is_contended #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) #else #define spin_is_contended(lock) (((void)(lock), 0)) #endif /*__raw_spin_is_contended*/ #endif /* The lock does not imply full memory barrier. */ #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK static inline void smp_mb__after_lock(void) { smp_mb(); } #endif /** * spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. */ #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) #ifdef CONFIG_DEBUG_SPINLOCK extern void _raw_spin_lock(spinlock_t *lock); #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); extern void _raw_read_lock(rwlock_t *lock); #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) extern int _raw_read_trylock(rwlock_t *lock); extern void _raw_read_unlock(rwlock_t *lock); extern void _raw_write_lock(rwlock_t *lock); #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); #else # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) # define _raw_read_lock_flags(lock, flags) \ __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) # define _raw_write_lock_flags(lock, flags) \ __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) #endif #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) /* * Define the various spin_lock and rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various * methods are defined as nops in the case they are not required. */ #define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) #define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) #define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) #define spin_lock(lock) _spin_lock(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) # define spin_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) #define read_lock(lock) _read_lock(lock) #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _spin_lock_irqsave(lock); \ } while (0) #define read_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _read_lock_irqsave(lock); \ } while (0) #define write_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _write_lock_irqsave(lock); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC #define spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else #define spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _spin_lock_irqsave(lock); \ } while (0) #endif #else #define spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _spin_lock_irqsave(lock, flags); \ } while (0) #define read_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _read_lock_irqsave(lock, flags); \ } while (0) #define write_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _write_lock_irqsave(lock, flags); \ } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ spin_lock_irqsave(lock, flags) #endif #define spin_lock_irq(lock) _spin_lock_irq(lock) #define spin_lock_bh(lock) _spin_lock_bh(lock) #define read_lock_irq(lock) _read_lock_irq(lock) #define read_lock_bh(lock) _read_lock_bh(lock) #define write_lock_irq(lock) _write_lock_irq(lock) #define write_lock_bh(lock) _write_lock_bh(lock) #define spin_unlock(lock) _spin_unlock(lock) #define read_unlock(lock) _read_unlock(lock) #define write_unlock(lock) _write_unlock(lock) #define spin_unlock_irq(lock) _spin_unlock_irq(lock) #define read_unlock_irq(lock) _read_unlock_irq(lock) #define write_unlock_irq(lock) _write_unlock_irq(lock) #define spin_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _spin_unlock_irqrestore(lock, flags); \ } while (0) #define spin_unlock_bh(lock) _spin_unlock_bh(lock) #define read_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _read_unlock_irqrestore(lock, flags); \ } while (0) #define read_unlock_bh(lock) _read_unlock_bh(lock) #define write_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _write_unlock_irqrestore(lock, flags); \ } while (0) #define write_unlock_bh(lock) _write_unlock_bh(lock) #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) #define spin_trylock_irq(lock) \ ({ \ local_irq_disable(); \ spin_trylock(lock) ? \ 1 : ({ local_irq_enable(); 0; }); \ }) #define spin_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ spin_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) #define write_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ write_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) */ #include
/** * atomic_dec_and_lock - lock on reaching reference count zero * @atomic: the atomic counter * @lock: the spinlock in question * * Decrements @atomic by 1. If the result is 0, returns true and locks * @lock. Returns false for all other cases. */ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) /** * spin_can_lock - would spin_trylock() succeed? * @lock: the spinlock in question. */ #define spin_can_lock(lock) (!spin_is_locked(lock)) /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: */ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) # include
#else # include
#endif #endif /* __LINUX_SPINLOCK_H */
<spinlock_types.h>头文件
#ifndef __LINUX_SPINLOCK_TYPES_H
#define __LINUX_SPINLOCK_TYPES_H
/*
* include/linux/spinlock_types.h - generic spinlock type definitions
* and initializers
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
#if defined(CONFIG_SMP)
# include
#else
# include
#endif
#include
typedef struct {
raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
typedef struct {
raw_rwlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
#else
# define SPIN_DEP_MAP_INIT(lockname)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
.magic = RWLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
RW_DEP_MAP_INIT(lockname) }
#else
# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
RW_DEP_MAP_INIT(lockname) }
#endif
/*
* SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
* are hence deprecated.
* Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
* __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#endif /* __LINUX_SPINLOCK_TYPES_H */
seqlock
seqlock适用于读很多,写很少的场合。但保护的对象不能有指针变量。
1)定义和初始化
seqlock_t my_seqlock = SEQLOCK_UNLOCKED;
或
seqlock_init(x)
2)读
do {
seq = read_seqbegin(&my_seqlock);
/*处理任务*/
} while read_seqretry(&my_seqlock, seq);
read_seqbegin(lock)
read_seqbegin_irqsave(lock, flags)
read_seqretry_irqrestore(lock, iv, flags)
3)写
void write_seqlock(seqlock_t *sl)
void write_sequnlock(seqlock_t *sl)
int write_tryseqlock(seqlock_t *sl)
#define write_seqlock_irqsave(lock, flags) \
do { local_irq_save(flags); write_seqlock(lock); } while (0)
#define write_seqlock_irq(lock) \
do { local_irq_disable(); write_seqlock(lock); } while (0)
#define write_seqlock_bh(lock) \
do { local_bh_disable(); write_seqlock(lock); } while (0)
#define write_sequnlock_irqrestore(lock, flags) \
do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
#define write_sequnlock_irq(lock) \
do { write_sequnlock(lock); local_irq_enable(); } while(0)
#define write_sequnlock_bh(lock) \
do { write_sequnlock(lock); local_bh_enable(); } while(0)
#define read_seqbegin_irqsave(lock, flags) \
({ local_irq_save(flags); read_seqbegin(lock); })
#define read_seqretry_irqrestore(lock, iv, flags) \
<linux/seqlock.h>头文件内容
#ifndef __LINUX_SEQLOCK_H
#define __LINUX_SEQLOCK_H
/*
* Reader/writer consistent mechanism without starving writers. This type of
* lock for data where the reader wants a consistent set of information
* and is willing to retry if the information changes. Readers never
* block but they may have to retry if a writer is in
* progress. Writers do not wait for readers.
*
* This is not as cache friendly as brlock. Also, this will not work
* for data that contains pointers, because any writer could
* invalidate a pointer that a reader was following.
*
* Expected reader usage:
* do {
* seq = read_seqbegin(&foo);
* ...
* } while (read_seqretry(&foo, seq));
*
*
* On non-SMP the spin locks disappear but the writer still needs
* to increment the sequence variables because an interrupt routine could
* change the state of the data.
*
* Based on x86_64 vsyscall gettimeofday
* by Keith Owens and Andrea Arcangeli
*/
#include
#include
typedef struct {
unsigned sequence;
spinlock_t lock;
} seqlock_t;
/*
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
#define __SEQLOCK_UNLOCKED(lockname) \
{ 0, __SPIN_LOCK_UNLOCKED(lockname) }
#define SEQLOCK_UNLOCKED \
__SEQLOCK_UNLOCKED(old_style_seqlock_init)
#define seqlock_init(x) \
do { \
(x)->sequence = 0; \
spin_lock_init(&(x)->lock); \
} while (0)
#define DEFINE_SEQLOCK(x) \
seqlock_t x = __SEQLOCK_UNLOCKED(x)
/* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
* Don't need preempt_disable() because that is in the spin_lock already.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
++sl->sequence;
smp_wmb();
}
static inline void write_sequnlock(seqlock_t *sl)
{
smp_wmb();
sl->sequence++;
spin_unlock(&sl->lock);
}
static inline int write_tryseqlock(seqlock_t *sl)
{
int ret = spin_trylock(&sl->lock);
if (ret) {
++sl->sequence;
smp_wmb();
}
return ret;
}
/* Start of read calculation -- fetch last complete writer token */
static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret;
repeat:
ret = ACCESS_ONCE(sl->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
smp_rmb();
return ret;
}
/*
* Test if reader processed invalid data.
*
* If sequence value changed then writer changed data while in section.
*/
static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
{
smp_rmb();
return unlikely(sl->sequence != start);
}
/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
* after the write_seqcount_end().
*/
typedef struct seqcount {
unsigned sequence;
} seqcount_t;
#define SEQCNT_ZERO { 0 }
#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
/**
* __read_seqcount_begin - begin a seq-read critical section (without barrier)
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
* provided before actually loading any of the variables that are to be
* protected in this critical section.
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
*/
static inline unsigned __read_seqcount_begin(const seqcount_t *s)
{
unsigned ret;
repeat:
ret = s->sequence;
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
return ret;
}
/**
* read_seqcount_begin - begin a seq-read critical section
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
*
* read_seqcount_begin opens a read critical section of the given seqcount.
* Validity of the critical section is tested by checking read_seqcount_retry
* function.
*/
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
unsigned ret = __read_seqcount_begin(s);
smp_rmb();
return ret;
}
/**
* raw_seqcount_begin - begin a seq-read critical section
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
*
* raw_seqcount_begin opens a read critical section of the given seqcount.
* Validity of the critical section is tested by checking read_seqcount_retry
* function.
*
* Unlike read_seqcount_begin(), this function will not wait for the count
* to stabilize. If a writer is active when we begin, we will fail the
* read_seqcount_retry() instead of stabilizing at the beginning of the
* critical section.
*/
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
smp_rmb();
return ret & ~1;
}
/**
* __read_seqcount_retry - end a seq-read critical section (without barrier)
* @s: pointer to seqcount_t
* @start: count, from read_seqcount_begin
* Returns: 1 if retry is required, else 0
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
* provided before actually loading any of the variables that are to be
* protected in this critical section.
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
*/
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
return unlikely(s->sequence != start);
}
/**
* read_seqcount_retry - end a seq-read critical section
* @s: pointer to seqcount_t
* @start: count, from read_seqcount_begin
* Returns: 1 if retry is required, else 0
*
* read_seqcount_retry closes a read critical section of the given seqcount.
* If the critical section was invalid, it must be ignored (and typically
* retried).
*/
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
return __read_seqcount_retry(s, start);
}
/*
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
static inline void write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
static inline void write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
}
/**
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
*
* After write_seqcount_barrier, no read-side seq operations will complete
* successfully and see data older than this.
*/
static inline void write_seqcount_barrier(seqcount_t *s)
{
smp_wmb();
s->sequence+=2;
}
/*
* Possible sw/hw IRQ protected versions of the interfaces.
*/
#define write_seqlock_irqsave(lock, flags) \
do { local_irq_save(flags); write_seqlock(lock); } while (0)
#define write_seqlock_irq(lock) \
do { local_irq_disable(); write_seqlock(lock); } while (0)
#define write_seqlock_bh(lock) \
do { local_bh_disable(); write_seqlock(lock); } while (0)
#define write_sequnlock_irqrestore(lock, flags) \
do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
#define write_sequnlock_irq(lock) \
do { write_sequnlock(lock); local_irq_enable(); } while(0)
#define write_sequnlock_bh(lock) \
do { write_sequnlock(lock); local_bh_enable(); } while(0)
#define read_seqbegin_irqsave(lock, flags) \
({ local_irq_save(flags); read_seqbegin(lock); })
#define read_seqretry_irqrestore(lock, iv, flags) \
({ \
int ret = read_seqretry(lock, iv); \
local_irq_restore(flags); \
ret; \
})
#endif /* __LINUX_SEQLOCK_H */
避免使用锁的方法:
1)循环缓冲区:邮箱机制/命名管道/circular buffer
2)atomic_t数据类型以及相关操作函数<asm/atomic.h>