windows 读写锁 python_自己动手实现读写锁(read-write lock)

本文详细介绍了如何在Windows环境下使用Python实现读写锁(read-write lock)。通过定义和初始化srwlock结构体,以及实现srwlock_rdlock、srwlock_rdunlock、srwlock_wrlock和srwlock_wrunlock等函数,来实现读写锁的获取与释放。这些函数涉及到了原子操作、自旋锁和等待块等概念,以确保在并发环境下的正确性。
摘要由CSDN通过智能技术生成

/*Have a wait block*/

#define SRWLOCK_WAIT 1

/*Users are readers*/

#define SRWLOCK_SHARED 2

/*Bit-lock for editing the wait block*/

#define SRWLOCK_LOCK 4

#define SRWLOCK_LOCK_BIT 2

/*Mask for the above bits*/

#define SRWLOCK_MASK 7

/*Number of current users * 8*/

#define SRWLOCK_USERS 8

typedef struct srwlock srwlock;

struct srwlock

{

uintptr_t p;

};

typedef struct srw_sw srw_sw;

struct srw_sw

{

uintptr_t spin;

srw_sw *next;

};

typedef struct srw_wb srw_wb;

struct srw_wb

{

/*s_count is the number of shared acquirers * SRWLOCK_USERS.*/

uintptr_t s_count;

/*Last points to the last wait block in the chain. The value

is only valid when read from the first wait block.*/

srw_wb *last;

/*Next points to the next wait block in the chain.*/

srw_wb *next;

/*The wake chain is only valid for shared wait blocks*/

srw_sw *wake;

srw_sw *last_shared;

int ex;

};

/*Wait for control of wait block*/

static srw_wb *lock_wb(srwlock *l)

{

uintptr_t p;

/*Spin on the wait block bit lock*/

while (atomic_bitsetandtest(&l->p, SRWLOCK_LOCK_BIT)) cpu_relax();

p = l->p;

barrier();

if (!(p & SRWLOCK_WAIT))

{

/*Oops, looks like the wait block was removed.*/

atomic_clear_bit(&l->p, SRWLOCK_LOCK_BIT);

return NULL;

}

return (srw_wb *)(p & ~SRWLOCK_MASK);

}

static void srwlock_init(srwlock *l)

{

l->p = 0;

}

static void srwlock_rdlock(srwlock *l)

{

srw_wb swblock;

srw_sw sw;

uintptr_t p;

srw_wb *wb, *shared;

while (1)

{

barrier();

p = l->p;

cpu_relax();

if (!p)

{

/*This is a fast path, we can simply try to set the shared count to 1*/

if (!cmpxchg(&l->p, 0, SRWLOCK_USERS | SRWLOCK_SHARED)) return;

continue;

}

/*Don't interfere with locking*/

if (p & SRWLOCK_LOCK) continue;

if (p & SRWLOCK_SHARED)

{

if (!(p & SRWLOCK_WAIT))

{

/*This is a fast path, just increment the number of current shared locks*/

if (cmpxchg(&l->p, p, p + SRWLOCK_USERS) == p) return;

}

else

{

/*There's other waiters already, lock the wait blocks and increment the shared count*/

wb = lock_wb(l);

if (wb) break;

}

continue;

}

/*Initialize wait block*/

swblock.ex = FALSE;

swblock.next = NULL;

swblock.last = &swblock;

swblock.wake = &sw;

sw.next = NULL;

sw.spin = 0;

if (!(p & SRWLOCK_WAIT))

{

/** We need to setup the first wait block.

* Currently an exclusive lock is held, change the lock to contended mode.*/

swblock.s_count = SRWLOCK_USERS;

swblock.last_shared = &sw;

if (cmpxchg(&l->p, p, (uintptr_t)&swblock | SRWLOCK_WAIT) == p)

{

while (!sw.spin) cpu_relax();

return;

}

continue;

}

/*Handle the contended but not shared case*/

/** There's other waiters already, lock the wait blocks and increment the shared count.

* If the last block in the chain is an exclusive lock, add another block.*/

swblock.s_count = 0;

wb = lock_wb(l);

if (!wb) continue;

shared = wb->last;

if (shared->ex)

{

shared->next = &swblock;

wb->last = &swblock;

shared = &swblock;

}

else

{

shared->last_shared->next = &sw;

}

shared->s_count += SRWLOCK_USERS;

shared->last_shared = &sw;

/*Unlock*/

barrier();

l->p &= ~SRWLOCK_LOCK;

/*Wait to be woken*/

while (!sw.spin) cpu_relax();

return;

}

/*The contended and shared case*/

sw.next = NULL;

sw.spin = 0;

if (wb->ex)

{

/** We need to setup a new wait block.

* Although we're currently in a shared lock and we're acquiring

* a shared lock, there are exclusive locks queued in between.

* We need to wait until those are released.*/

shared = wb->last;

if (shared->ex)

{

swblock.ex = FALSE;

swblock.s_count = SRWLOCK_USERS;

swblock.next = NULL;

swblock.last = &swblock;

swblock.wake = &sw;

swblock.last_shared = &sw;

shared->next = &swblock;

wb->last = &swblock;

}

else

{

shared->last_shared->next = &sw;

shared->s_count += SRWLOCK_USERS;

shared->last_shared = &sw;

}

}

else

{

wb->last_shared->next = &sw;

wb->s_count += SRWLOCK_USERS;

wb->last_shared = &sw;

}

/*Unlock*/

barrier();

l->p &= ~SRWLOCK_LOCK;

/*Wait to be woken*/

while (!sw.spin) cpu_relax();

}

static void srwlock_rdunlock(srwlock *l)

{

uintptr_t p, np;

srw_wb *wb;

srw_wb *next;

while (1)

{

barrier();

p = l->p;

cpu_relax();

if (p & SRWLOCK_WAIT)

{

/** There's a wait block, we need to wake a pending exclusive acquirer,

* if this is the last shared release.*/

wb = lock_wb(l);

if (wb) break;

continue;

}

/*Don't interfere with locking*/

if (p & SRWLOCK_LOCK) continue;

/** This is a fast path, we can simply decrement the shared

* count and store the pointer*/

np = p - SRWLOCK_USERS;

/*If we are the last reader, then the lock is unused*/

if (np == SRWLOCK_SHARED) np = 0;

/*Try to release the lock*/

if (cmpxchg(&l->p, p, np) == p) return;

}

wb->s_count -= SRWLOCK_USERS;

if (wb->s_count)

{

/*Unlock*/

barrier();

l->p &= ~SRWLOCK_LOCK;

return;

}

next = wb->next;

if (next)

{

/** There's more blocks chained, we need to update the pointers

* in the next wait block and update the wait block pointer.*/

np = (uintptr_t)next | SRWLOCK_WAIT;

next->last = wb->last;

}

else

{

/*Convert the lock to a simple exclusive lock.*/

np = SRWLOCK_USERS;

}

barrier();

/*This also unlocks wb lock bit*/

l->p = np;

barrier();

wb->wake = (void *) 1;

barrier();

/*We released the lock*/

}

static int srwlock_rdtrylock(srwlock *s)

{

uintptr_t p = s->p;

barrier();

/*This is a fast path, we can simply try to set the shared count to 1*/

if (!p && (cmpxchg(&s->p, 0, SRWLOCK_USERS | SRWLOCK_SHARED) == 0)) return 0;

if ((p & (SRWLOCK_SHARED | SRWLOCK_WAIT)) == SRWLOCK_SHARED)

{

/*This is a fast path, just increment the number of current shared locks*/

if (cmpxchg(&s->p, p, p + SRWLOCK_USERS) == p) return 0;

}

return EBUSY;

}

static void srwlock_wrlock(srwlock *l)

{

srw_wb swblock;

uintptr_t p, np;

/*Fastpath - no other readers or writers*/

if (!l->p && (!cmpxchg(&l->p, 0, SRWLOCK_USERS))) return;

/*Initialize wait block*/

swblock.ex = TRUE;

swblock.next = NULL;

swblock.last = &swblock;

swblock.wake = NULL;

while (1)

{

barrier();

p = l->p;

cpu_relax();

if (p & SRWLOCK_WAIT)

{

srw_wb *wb = lock_wb(l);

if (!wb) continue;

/*Complete Initialization of block*/

swblock.s_count = 0;

wb->last->next = &swblock;

wb->last = &swblock;

/*Unlock*/

barrier();

l->p &= ~SRWLOCK_LOCK;

/*Has our wait block became the first one in the chain?*/

while (!swblock.wake) cpu_relax();

return;

}

/*Fastpath - no other readers or writers*/

if (!p)

{

if (!cmpxchg(&l->p, 0, SRWLOCK_USERS)) return;

continue;

}

/*Don't interfere with locking*/

if (p & SRWLOCK_LOCK) continue;

/*There are no wait blocks so far, we need to add ourselves as the first wait block.*/

if (p & SRWLOCK_SHARED)

{

swblock.s_count = p & ~SRWLOCK_MASK;

np = (uintptr_t)&swblock | SRWLOCK_SHARED | SRWLOCK_WAIT;

}

else

{

swblock.s_count = 0;

np = (uintptr_t)&swblock | SRWLOCK_WAIT;

}

/*Try to make change*/

if (cmpxchg(&l->p, p, np) == p) break;

}

/*Has our wait block became the first one in the chain?*/

while (!swblock.wake) cpu_relax();

}

static void srwlock_wrunlock(srwlock *l)

{

uintptr_t p, np;

srw_wb *wb;

srw_wb *next;

srw_sw *wake, *wake_next;

while (1)

{

barrier();

p = l->p;

cpu_relax();

if (p == SRWLOCK_USERS)

{

/** This is the fast path, we can simply clear the SRWLOCK_USERS bit.

* All other bits should be 0 now because this is a simple exclusive lock,

* and no one else is waiting.*/

if (cmpxchg(&l->p, SRWLOCK_USERS, 0) == SRWLOCK_USERS) return;

continue;

}

/*There's a wait block, we need to wake the next pending acquirer*/

wb = lock_wb(l);

if (wb) break;

}

next = wb->next;

if (next)

{

/** There's more blocks chained, we need to update the pointers

* in the next wait block and update the wait block pointer.*/

np = (uintptr_t)next | SRWLOCK_WAIT;

if (!wb->ex)

{

/*Save the shared count*/

next->s_count = wb->s_count;

np |= SRWLOCK_SHARED;

}

next->last = wb->last;

}

else

{

/*Convert the lock to a simple lock.*/

if (wb->ex)

{

np = SRWLOCK_USERS;

}

else

{

np = wb->s_count | SRWLOCK_SHARED;

}

}

barrier();

/*Also unlocks lock bit*/

l->p = np;

barrier();

if (wb->ex)

{

barrier();

/*Notify the next waiter*/

wb->wake = (void *) 1;

barrier();

return;

}

/*We now need to wake all others required.*/

for (wake = wb->wake; wake; wake = wake_next)

{

barrier();

wake_next = wake->next;

barrier();

wake->spin = 1;

barrier();

}

}

static int srwlock_wrtrylock(srwlock *s)

{

/*No other readers or writers?*/

if (!s->p && (cmpxchg(&s->p, 0, SRWLOCK_USERS) == 0)) return 0;

return EBUSY;

}

读写锁是一种用于多线程编程的同步机制,它允许多个线程同时读取共享资源,但只允许一个线程写入共享资源。在Python中,可以使用threading模块中的RLock类来实现读写锁。 下面是一个使用Python实现读写锁的例子: ```python import threading class RWLock: def __init__(self): self._lock = threading.Lock() self._read_lock = threading.Lock() self._write_lock = threading.Lock() self._read_count = 0 def acquire_read(self): with self._lock: self._read_count += 1 if self._read_count == 1: self._write_lock.acquire() self._read_lock.acquire() self._read_lock.release() def release_read(self): with self._lock: self._read_count -= 1 if self._read_count == 0: self._write_lock.release() def acquire_write(self): self._write_lock.acquire() def release_write(self): self._write_lock.release() ``` 在上面的代码中,我们定义了一个RWLock类,它包含了acquire_read、release_read、acquire_write和release_write等方法。acquire_read方法用于获取读锁,release_read方法用于释放读锁,acquire_write方法用于获取写锁,release_write方法用于释放写锁。 使用读写锁的示例代码如下: ```python lock = RWLock() def read_func(): lock.acquire_read() # 读取共享资源的操作 lock.release_read() def write_func(): lock.acquire_write() # 写入共享资源的操作 lock.release_write() # 创建多个读线程和写线程 read_threads = [threading.Thread(target=read_func) for _ in range(5)] write_threads = [threading.Thread(target=write_func) for _ in range(2)] # 启动线程 for thread in read_threads + write_threads: thread.start() # 等待线程结束 for thread in read_threads + write_threads: thread.join() ``` 上面的代码中,我们创建了5个读线程和2个写线程,并通过调用start方法启动线程。然后,我们使用join方法等待所有线程执行完毕。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值