分布式服务框架 Zookeeper -- 管理分布式环境中的数据
基于zookeeper的分布锁的实现,存在羊群效应
public class SDistributeLock {
private String lockZnode = null;
private String lockNameSpace = "/mylock";
private String nodeString = lockNameSpace + "/test1";
private static ZooKeeper zk;
private static String zooKeeperUrl="localhost:2181,localhost:2182,localhost:2183";
static {
CountDownLatch connectorSemaphore = new CountDownLatch(1);
String url = zooKeeperUrl;
int timeout = 60000;
try {
zk = new ZooKeeper(url, timeout, new ConnectWatcher(connectorSemaphore));
System.out.println("++++++++++++++++++"+zk.getState());
connectorSemaphore.await();
System.out.println("++++++++++++++++++"+zk.getState());
} catch (IOException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
private void ensureRootPath() throws InterruptedException {
try {
if (zk.exists(lockNameSpace,true)==null){
zk.create(lockNameSpace,"".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
}
} catch (KeeperException e) {
e.printStackTrace();
}
}
private void watchNode(final String nodeString, final Thread thread) throws InterruptedException {
try {
zk.exists(nodeString, new Watcher() {
@Override
public void process(WatchedEvent watchedEvent) {
System.out.println("out "+thread.getName()+"==" + watchedEvent.toString());
if(watchedEvent.getType() == Event.EventType.NodeDeleted){
System.out.println("外部 "+thread.getName()+"==" + "进入中断");
thread.interrupt();
}
try {
zk.exists(nodeString,new Watcher() {
@Override
public void process(WatchedEvent watchedEvent) {
System.out.println("in "+thread.getName()+ "==" + watchedEvent.toString());
if(watchedEvent.getType() == Event.EventType.NodeDeleted){
System.out.println("内部 "+thread.getName()+"==" + "进入中断");
thread.interrupt();
}
try {
zk.exists(nodeString,true);
} catch (KeeperException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
} catch (KeeperException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
} catch (KeeperException e) {
e.printStackTrace();
}
}
/**
* 获取锁
* @return
* @throws InterruptedException
*/
public boolean lock() throws InterruptedException {
String path = null;
ensureRootPath();
watchNode(nodeString,Thread.currentThread());
while (true) {
try {
path = zk.create(nodeString, "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
} catch (KeeperException e) {
System.out.println(Thread.currentThread().getName() + " getting Lock but can not get");
try {
Thread.sleep(5000);
}catch (InterruptedException ex){
System.out.println("thread is notify");
}
}
if (!Strings.nullToEmpty(path).trim().isEmpty()) {
System.out.println(Thread.currentThread().getName() + " get Lock...");
return true;
}
}
}
/**
* 释放锁
*/
public void unlock(){
try {
zk.delete(nodeString,-1);
System.out.println(Thread.currentThread().getName() + " release Lock...");
} catch (InterruptedException e) {
e.printStackTrace();
} catch (KeeperException e) {
e.printStackTrace();
}
}
}
测试类
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest
public class LockTest {
@Test
public void testDistributeLock(){
ExecutorService executor = Executors.newCachedThreadPool();
final int count = 1000;
//在完成一组正在其他线程中执行的操作之前,它允许一个或多个线程一直等待。
final CountDownLatch latch = new CountDownLatch(count);
for (int i = 0; i < count; i++) {
final DistributeLock node = new DistributeLock();
executor.submit(new Runnable() {
public void run() {
try {
//如果不加CountDownLatch,睡1s会直接让主线程跑完shutdown而没有加锁,
// 这里睡1000秒可以让多个线程同时执行
Thread.sleep(1000);
node.lock(); // 阻塞获取锁
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
} finally {
latch.countDown();
node.unlock();
}
}
});
}
try {
//await方法调用此方法会一直阻塞当前线程,直到计时器的值为0
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
executor.shutdown();
}
}
执行结果
pool-3-thread-5 get Lock...
pool-3-thread-2 getting Lock but can not get
pool-3-thread-3 getting Lock but can not get
pool-3-thread-1 getting Lock but can not get
pool-3-thread-4 getting Lock but can not get
out pool-3-thread-2==WatchedEvent state:SyncConnected type:NodeCreated path:/mylock/test1
out pool-3-thread-1==WatchedEvent state:SyncConnected type:NodeCreated path:/mylock/test1
out pool-3-thread-5==WatchedEvent state:SyncConnected type:NodeCreated path:/mylock/test1
in pool-3-thread-2==WatchedEvent state:SyncConnected type:NodeDeleted path:/mylock/test1
内部 pool-3-thread-2==进入中断
thread is notify
pool-3-thread-5 release Lock...
in pool-3-thread-1==WatchedEvent state:SyncConnected type:NodeDeleted path:/mylock/test1
内部 pool-3-thread-1==进入中断
thread is notify
pool-3-thread-2 get Lock...
out pool-3-thread-4==WatchedEvent state:SyncConnected type:NodeDeleted path:/mylock/test1
外部 pool-3-thread-4==进入中断
thread is notify
pool-3-thread-1 getting Lock but can not get
in pool-3-thread-3==WatchedEvent state:SyncConnected type:NodeDeleted path:/mylock/test1
内部 pool-3-thread-3==进入中断
thread is notify
pool-3-thread-4 getting Lock but can not get
in pool-3-thread-5==WatchedEvent state:SyncConnected type:NodeDeleted path:/mylock/test1
内部 pool-3-thread-5==进入中断
pool-3-thread-3 getting Lock but can not get
in pool-3-thread-4==WatchedEvent state:SyncConnected type:NodeDeleted path:/mylock/test1
内部 pool-3-thread-4==进入中断
pool-3-thread-2 release Lock...
thread is notify
pool-3-thread-4 get Lock...
pool-3-thread-4 release Lock...
pool-3-thread-1 get Lock...
pool-3-thread-3 getting Lock but can not get
pool-3-thread-1 release Lock...
pool-3-thread-3 get Lock...
解决羊群效应:
public class DistributeLock {
private String lockZnode = null;
private static ZooKeeper zk;
private static String zooKeeperUrl="localhost:2181,localhost:2182,localhost:2183";
static {
CountDownLatch connectorSemaphore = new CountDownLatch(1);
String url = zooKeeperUrl;
int timeout = 60000;
try {
zk = new ZooKeeper(url, timeout, new ConnectWatcher(connectorSemaphore));
System.out.println("++++++++++++++++++"+zk.getState());
connectorSemaphore.await();
System.out.println("++++++++++++++++++"+zk.getState());
} catch (IOException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* 获取锁
* @return
* @throws InterruptedException
*/
public void lock(){
try {
Stat stat = zk.exists("/locknode", false);//此去不执行 Watcher
if(stat == null){
//创建根节点,永久存在
zk.create("/locknode", "lock".getBytes(),
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
}
String path = zk.create("/locknode/guid-lock","lock".getBytes(),
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
lockZnode=path;
List<String> children = zk.getChildren("/locknode", true);
Collections.sort(children);
if (!Strings.nullToEmpty(path).trim().isEmpty()
&&!Strings.nullToEmpty(children.get(0)).trim().isEmpty()
&&path.equals("/locknode/"+children.get(0))) {
System.out.println(Thread.currentThread().getName() + " get Lock...");
return;
}
String watchNode = null;
for (int i=children.size()-1;i>=0;i--){
if(children.get(i).compareTo(path.substring(path.lastIndexOf("/") + 1))<0){
watchNode = children.get(i);
break;
}
}
if (watchNode!=null){
final String watchNodeTmp = watchNode;
//给当前线程的创建的znode小的znode添加监听,当发生删除事件的时候只叫醒当前的这个线程
final Thread thread = Thread.currentThread();
Stat stat1 = zk.exists("/locknode/" + watchNodeTmp,new Watcher() {
@Override
public void process(WatchedEvent watchedEvent) {
if(watchedEvent.getType() == Watcher.Event.EventType.NodeDeleted){
thread.interrupt();
}
try {
zk.exists("/locknode/" + watchNodeTmp,true);
} catch (KeeperException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
if(stat1 != null){
System.out.println("Thread " + Thread.currentThread().getId() + " waiting for " + "/locknode/" + watchNode);
}
}
try {
//等待直到被唤醒
Thread.sleep(1000000000);
}catch (InterruptedException ex){
System.out.println(Thread.currentThread().getName() + " notify");
System.out.println(Thread.currentThread().getName() + " get Lock...");
return;
}
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 释放锁
*/
public void unlock(){
try {
System.out.println(Thread.currentThread().getName() + "release Lock...");
zk.delete(lockZnode,-1);
} catch (InterruptedException e) {
e.printStackTrace();
} catch (KeeperException e) {
e.printStackTrace();
}
}
}
但是利用zookeeper1的端口去get znode的时候,不管这个znode是在哪个zookeeper服务器下创建的都可以获取到,说明整个zookeeper集群维护的是同一个文件结构
另外在创建节点的时候必须是CreateMode.PERSISTENT_SEQUENTIAL持久化类型的,否则在使用命令获取节点的时候是获取不到的,EPHEMERAL这种类型的数据是停止服务后无法获取的,请求的服务关闭时临时的值也会消失
将APP1的所有配置配置到/APP1 znode下,APP1所有机器一启动就对/APP1这个节点进行监控(zk.exist("/APP1",true)),并且实现回调方法Watcher,那么在zookeeper上/APP1 znode节点下数据发生变化的时候,每个机器都会收到通知,Watcher方法将会被执行,那么应用再取下数据即可(zk.getData("/APP1",false,null));
一个leader,多个follower
每个server保存一份数据副本
全局数据一致
分布式读写
ZooKeeper拥有一个层次的命名空间,这个和分布式的文件系统非常相似。不同的是ZooKeeper命名空间中的Znode,兼具文件和目录两种特点。既像文件一样维护着数据、元信息、ACL、时间戳等数据结构,又像目录一样可以作为路径标识的一部分,并可以具有子znode。用户对znode具有增、删、改、查等操作
ZooKeeper目录树中每一个节点对应一个Znode。每个Znode维护着一个属性结构,它包含着版本号(dataVersion),时间戳(ctime,mtime)等状态信息。ZooKeeper正是使用节点的这些特性来实现它的某些特定功能。每当Znode的数据改变时,他相应的版本号将会增加。每当客户端检索数据时,它将同时检索数据的版本号。并且如果一个客户端执行了某个节点的更新或删除操作,他也必须提供要被操作的数据版本号。如果所提供的数据版本号与实际不匹配,那么这个操作将会失败。
ooKeeper允许各分布式进程通过一个共享的命名空间相互联系,该命名空间类似于一个标准的层次型的文件系统:由若干注册了的数据节点构成(用Zookeeper的术语叫znode),这些节点类似于文件和目录。
客户端可以在节点上设置watch(我们称之为监视器)。当节点状态发生改变时(数据的增、删、改)将会触发watch所对应的操作。当watch被触发时,ZooKeeper将会向客户端发送且仅发送一条通知,因为watch只能被触发一次。
ZK集群搭建
利用zookeeper部署了一个最小的虚拟集群,有3个zookeeper服务器,三个服务器使用的是同一个IP但是不同的端口,写了一个服务多次请求这个集群可以清楚的看到,多次请求分别到了不同的zookeeper服务器上。
但是利用zookeeper1的端口去get znode的时候,不管这个znode是在哪个zookeeper服务器下创建的都可以获取到,说明整个zookeeper集群维护的是同一个文件结构,每个zk节点的数据都是一致的
另外在创建节点的时候必须是CreateMode.PERSISTENT_SEQUENTIAL持久化类型的,否则在使用命令获取节点的时候是获取不到的,EPHEMERAL这种类型的数据是停止服务后无法获取的,请求的服务关闭时临时的值也会消失