最近学习了zookeeper分布式锁的相关知识,学完做个总结记录一下:
在linux3台机器装好zookeeper的集群就可以用zookeeper自带的API进行测试使用了:
首先在项目的pom里面导入zookeeper的jar包如下所示:
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>3.4.9</version>
</dependency>
创建一个分布式锁的工具类如下所示,只包含部分常用的方法,如果要用到其他的方法可以去官网查询然后自己封装:
package tf56.work.utils;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.Stat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
/**
* Created by Administrator on 2017/8/21.
*/
public class DistributedLock implements Lock, Watcher {
private ZooKeeper zk;
private String root = "/locks";//根
private String lockName;//竞争资源的标志
private String waitNode;//等待前一个锁
private String myZnode;//当前锁
private CountDownLatch latch;//计数器
private CountDownLatch connectedSignal=new CountDownLatch(1);
private int sessionTimeout = 30000;
public DistributedLock(String config, String lockName) {
this.lockName=lockName;
// 创建一个与服务器的连接
try {
zk=new ZooKeeper(config, sessionTimeout, this);
connectedSignal.await();
Stat stat=zk.exists(root, false);//此去不执行 Watcher
if (stat == null) {
// 创建根节点
zk.create(root, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
}
} catch (IOException e) {
throw new LockException(e);
} catch (KeeperException e) {
throw new LockException(e);
} catch (InterruptedException e) {
throw new LockException(e);
}
}
/**
* zookeeper节点的监视器
*/
@Override
public void process(WatchedEvent event) {
//建立连接用
if (event.getState() == Event.KeeperState.SyncConnected) {
connectedSignal.countDown();
return;
}
//其他线程放弃锁的标志
if (this.latch != null) {
this.latch.countDown();
}
}
public void lock() {
try {
if (this.tryLock()) {
System.out.println("Thread " + Thread.currentThread().getId() + " " + myZnode + " get lock true");
return;
} else {
waitForLock(waitNode, sessionTimeout);//等待锁
System.out.println("Thread " + Thread.currentThread().getId() + " " + myZnode + " get the wait lock true");
}
} catch (KeeperException e) {
throw new LockException(e);
} catch (InterruptedException e) {
throw new LockException(e);
}
}
public boolean tryLock() {
try {
String splitStr = "_lock_";
if(lockName.contains(splitStr))
throw new LockException("lockName can not contains \\u000B");
//创建临时子节点
myZnode = zk.create(root + "/" + lockName + splitStr, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL_SEQUENTIAL);
System.out.println(myZnode + " is created ");
//取出所有子节点
List<String> subNodes = zk.getChildren(root, false);
//取出所有lockName的锁
List<String> lockObjNodes = new ArrayList<String>();
for (String node : subNodes) {
String _node = node.split(splitStr)[0];
if(_node.equals(lockName)){
lockObjNodes.add(node);
}
}
Collections.sort(lockObjNodes);
if(myZnode.equals(root+"/"+lockObjNodes.get(0))){
//如果是最小的节点,则表示取得锁
System.out.println(myZnode + "==" + lockObjNodes.get(0));
return true;
}
//如果不是最小的节点,找到比自己小1的节点
String subMyZnode = myZnode.substring(myZnode.lastIndexOf("/") + 1);
waitNode = lockObjNodes.get(Collections.binarySearch(lockObjNodes, subMyZnode) - 1);//找到前一个子节点
} catch (KeeperException e) {
throw new LockException(e);
} catch (InterruptedException e) {
throw new LockException(e);
}
return false;
}
public boolean tryLock(long time, TimeUnit unit) {
try {
if(this.tryLock()){
return true;
}
return waitForLock(waitNode,time);
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
private boolean waitForLock(String lower, long waitTime) throws InterruptedException, KeeperException {
Stat stat = zk.exists(root + "/" + lower,true);//同时注册监听。
//判断比自己小一个数的节点是否存在,如果不存在则无需等待锁,同时注册监听
if(stat != null){
System.out.println("Thread " + Thread.currentThread().getId() + " waiting for " + root + "/" + lower);
this.latch = new CountDownLatch(1);
this.latch.await(waitTime, TimeUnit.MILLISECONDS);//等待,这里应该一直等待其他线程释放锁
this.latch = null;
}
return true;
}
public void unlock() {
try {
System.out.println("unlock " + myZnode);
zk.delete(myZnode,-1);
myZnode = null;
zk.close();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (KeeperException e) {
e.printStackTrace();
}
}
public void lockInterruptibly() throws InterruptedException {
this.lock();
}
public Condition newCondition() {
return null;
}
public class LockException extends RuntimeException {
private static final long serialVersionUID = 1L;
public LockException(String e){
super(e);
}
public LockException(Exception e){
super(e);
}
}
}
测试类如下所示:
package test_Controller;
import tf56.work.utils.DistributedLock;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* Created by Administrator on 2017/8/21.
*/
public class DistributedLockTest {
public static String hosts = "10.7.28.179:2181,10.7.28.180:2181,10.7.28.181:2181";
public static void main(String [] args) {
ExecutorService executor = Executors.newCachedThreadPool();
final int count = 10;
final CountDownLatch latch = new CountDownLatch(count);
for (int i = 0; i < count; i++) {
final DistributedLock node = new DistributedLock(hosts,"lock");
executor.submit(new Runnable() {
public void run() {
try {
node.lock(); // 阻塞获取锁
//让每个线程谁1秒,让更多的线程去尝试竞争锁资源
Thread.sleep(1000);
/* System.out.println("id: " + node. + " is leader: " + node.isOwner()); */
} catch (Exception e) {
e.printStackTrace();
} finally {
latch.countDown();
node.unlock();
}
}
});
}
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
executor.shutdown();
}
}
测试结果如下所示:
/locks/lock_lock_0000000144 is created
/locks/lock_lock_0000000145 is created
/locks/lock_lock_0000000144==lock_lock_0000000144
Thread 13 /locks/lock_lock_0000000144 get lock true
Thread 16 waiting for /locks/lock_lock_0000000144
/locks/lock_lock_0000000146 is created
Thread 19 waiting for /locks/lock_lock_0000000145
/locks/lock_lock_0000000147 is created
Thread 22 waiting for /locks/lock_lock_0000000146
/locks/lock_lock_0000000148 is created
Thread 25 waiting for /locks/lock_lock_0000000147
/locks/lock_lock_0000000149 is created
Thread 28 waiting for /locks/lock_lock_0000000148
/locks/lock_lock_0000000150 is created
Thread 31 waiting for /locks/lock_lock_0000000149
/locks/lock_lock_0000000151 is created
Thread 34 waiting for /locks/lock_lock_0000000150
/locks/lock_lock_0000000152 is created
Thread 37 waiting for /locks/lock_lock_0000000151
/locks/lock_lock_0000000153 is created
Thread 40 waiting for /locks/lock_lock_0000000152
/locks/lock_lock_0000000154 is created
Thread 43 waiting for /locks/lock_lock_0000000153
/locks/lock_lock_0000000155 is created
Thread 46 waiting for /locks/lock_lock_0000000154
ocks/lock_lock_0000000156 is created
Thread 49 waiting for /locks/lock_lock_0000000155
/locks/lock_lock_0000000157 is created
Thread 52 waiting for /locks/lock_lock_0000000156
/locks/lock_lock_0000000158 is created
Thread 55 waiting for /locks/lock_lock_0000000157
/locks/lock_lock_0000000159 is created
Thread 58 waiting for /locks/lock_lock_0000000158
/locks/lock_lock_0000000160 is created
Thread 61 waiting for /locks/lock_lock_0000000159
/locks/lock_lock_0000000161 is created
Thread 64 waiting for /locks/lock_lock_0000000160
/locks/lock_lock_0000000162 is created
Thread 67 waiting for /locks/lock_lock_0000000161
/locks/lock_lock_0000000163 is created
Thread 70 waiting for /locks/lock_lock_0000000162
/locks/lock_lock_0000000164 is created
Thread 73 waiting for /locks/lock_lock_0000000163