基于CAS实现的自旋锁
#ifndef _X_FREE_LOCK_H
#define _X_FREE_LOCK_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
typedef struct
{
int m_lock;
inline void spinlock_init()
{
m_lock = 0;
}
inline void spinlock_lock()
{
while(!__sync_bool_compare_and_swap(&m_lock, 0, 1)) {}
}
inline void spinlock_unlock()
{
__sync_lock_release(&m_lock);
}
} spinlock_t; //自旋锁,当长时间执行不成功,会对CPU带来较大的开销,故一般应用在上锁处理业务时间较短的场景
class AutoReleaseLock
{
public:
AutoReleaseLock(spinlock_t* lock)
{
lock->spinlock_lock();
m_lock = lock;
}
~AutoReleaseLock()
{
m_lock->spinlock_unlock();
}
private:
spinlock_t* m_lock;
};
#endif
基于CAS实现的无锁队列
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include "x_freeLock.h"
#define SHM_NAME_LEN 128
#define MIN(a, b) ((a) > (b) ? (b) : (a))
#define IS_POT(x) ((x) && !((x) & ((x)-1)))
#define MEMORY_BARRIER __sync_synchronize()
template <class T,bool NOLOCK=false>
class LockFreeQueue
{
public:
// size:队列大小
// name:共享内存key的路径名称,默认为NULL,使用数组作为底层缓冲区。
LockFreeQueue(unsigned int size, const char* name = NULL)
{
memset(shm_name, 0, sizeof(shm_name));
createQueue(name, size);
}
~LockFreeQueue()
{
if(shm_name[0] == 0)
{
delete [] m_buffer;
m_buffer = NULL;
}
else
{
if (munmap(m_buffer, m_size * sizeof(T)) == -1) {
perror("munmap");
}
if (shm_unlink(shm_name) == -1) {
perror("shm_unlink");
}
}
}
bool isFull()const
{
#ifdef USE_POT
return m_head == (m_tail + 1) & (m_size - 1);
#else
return m_head == (m_tail + 1) % m_size;
#endif
}
bool isEmpty()const
{
return m_head == m_tail;
}
unsigned int front()const
{
return m_head;
}
unsigned int tail()const
{
return m_tail;
}
bool push(const T& value)
{
if(NOLOCK)
{
m_spinLock.spinlock_lock();
}
if(isFull())
{
if(NOLOCK)
{
m_spinLock.spinlock_unlock();
}
return false;
}
memcpy(m_buffer + m_tail, &value, sizeof(T));
#ifdef USE_MB
MEMORY_BARRIER;
#endif
#ifdef USE_POT
m_tail = (m_tail + 1) & (m_size - 1);
#else
m_tail = (m_tail + 1) % m_size;
#endif
if(NOLOCK)
{
m_spinLock.spinlock_unlock();
}
return true;
}
bool pop(T& value)
{
if(NOLOCK)
{
m_spinLock.spinlock_lock();
}
if (isEmpty())
{
if(NOLOCK)
{
m_spinLock.spinlock_unlock();
}
return false;
}
memcpy(&value, m_buffer + m_head, sizeof(T));
#ifdef USE_MB
MEMORY_BARRIER;
#endif
#ifdef USE_POT
m_head = (m_head + 1) & (m_size - 1);
#else
m_head = (m_head + 1) % m_size;
#endif
if(NOLOCK)
{
m_spinLock.spinlock_unlock();
}
return true;
}
protected:
virtual void createQueue(const char* name, unsigned int size)
{
#ifdef USE_POT
if (!IS_POT(size))
{
size = roundup_pow_of_two(size);
}
#endif
m_size = size;
m_head = m_tail = 0;
if(name == NULL)
{
m_buffer = new T[m_size];
}
else
{
int shm_fd = shm_open(name, O_CREAT | O_RDWR, 0666);
if (shm_fd < 0)
{
perror("shm_open");
}
if (ftruncate(shm_fd, m_size * sizeof(T)) < 0)
{
perror("ftruncate");
close(shm_fd);
}
void *addr = mmap(0, m_size * sizeof(T), PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
if (addr == MAP_FAILED)
{
perror("mmap");
close(shm_fd);
}
if (close(shm_fd) == -1)
{
perror("close");
exit(1);
}
m_buffer = static_cast<T*>(addr);
memcpy(shm_name, name, SHM_NAME_LEN - 1);
}
if(NOLOCK)
{
m_spinLock.spinlock_init();
}
}
inline unsigned int roundup_pow_of_two(size_t size)
{
size |= size >> 1;
size |= size >> 2;
size |= size >> 4;
size |= size >> 8;
size |= size >> 16;
size |= size >> 32;
return size + 1;
}
protected:
char shm_name[SHM_NAME_LEN];
volatile unsigned int m_head;
volatile unsigned int m_tail;
unsigned int m_size;
spinlock_t m_spinLock;
T* m_buffer;
};
这里需要注意的是,单生产者和单消费者模型,不需要加锁和原子操作,是否线程安全?
理论是不安全的,但对这种特殊模式的结果是不影响的。
//单消费者单生产者模型,各自为单独线程,不需要加锁和原子操作
//需要注意的点:判满和判空(这两个操作都访问了读写两个索引,这里需要考虑是否线程安全)
//同时访问读写索引,从理论来看是不安全的,但实际并不影响结果
//生产者:执行push操作,需要判满,需要访问读索引,此刻可能消费者正在操作读索引,如果线程不安全,只能执行
//满逻辑,直接return,这并不会导致越界问题。
//消费者:执行pop操作,需要判空,在判空过程中,实际情况是为空的,但生产者对写索引操作造成了线程不安全,
//只能执行不空逻辑,此刻也可以正常消费,因为真的有数据
template <class T>
class SingleQueue : public LockFreeQueue<T,true>
{
public:
SingleQueue(unsigned int size, const char* name = NULL):LockFreeQueue{size,name}{}
};