zookeeper
1.1模型结构
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-XSaMqVng-1620627676303)(D:\data\note\image\image-20210508121407074.png)]
1.2 Linux安装
安装jdk
wget https://mirrors.bfsu.edu.cn/apache/zookeeper/zookeeper-3.6.3/apache-zookeeper-3.6.3-bin.tar.gz
tar apache-zookeeper-3.6.3-bin.tar.gz -C /usr/local/
1.3修改配置文件
cd /usr/local/zookeeper-3.6.3/conf
#修改配置名 才能识别
[root@server-200 conf]# mv zoo_sample.cfg zoo.cfg
#查看配置文件
[root@server-200 conf]# mkdir /tmp/zookeeper
[root@server-200 conf]# vim zoo.cfg
# The number of milliseconds of each tick
tickTime=2000 #####集群节点之间心跳时间2秒 毫秒单位
# The number of ticks that the initial
# synchronization phase can take
initLimit=10 #####初始化集群时集群节点同步超时时间20s
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5 ####集群在运行过程中同步数据超时时间10s
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper #############数据存放目录
# the port at which the clients will connect
clientPort=2181 #######端口号
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60 ##### 线程池线程数量最大并发连接60
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
## Metrics Providers
#
# https://prometheus.io Metrics Exporter
#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
#metricsProvider.httpPort=7000
#metricsProvider.exportJvmInfo=true
1.4 启动zookeeper
[root@server-200 zookeeper-3.6.3]# ./bin/zkServer.sh start /usr/local/zookeeper-3.6.3/conf/zoo.cfg
#查看进程
[root@server-200 zookeeper-3.6.3]# jps
2827 QuorumPeerMain #zookeeper进程
2844 Jps
[root@server-200 zookeeper-3.6.3]#
1.5 连接zookeeper
#连接远程
[root@server-200 zookeeper-3.6.3]# ./bin/zkCli.sh -server 192.168.1.200:2181
#连接本地可以什么不用写
[root@server-200 zookeeper-3.6.3]# ./bin/zkCli.sh
1.6 基本语法
[zk: 192.168.1.200:2181(CONNECTED) 1] ls / #查看根节点
[zookeeper] ###默认存在的节点
[zk: 192.168.1.200:2181(CONNECTED) 2]
[zk: localhost:2181(CONNECTED) 0] ls /zookeeper
[config, quota]
1. ls path #查看指定节点下面的子节点
2. create path data #创建一个节点,并给节点绑定数据(默认时持久性节点)
-create path data 创建持久节点(默认是持久节点)
-create -s path ddata 创建持久顺序节点
-create -e path data 创建临时节点(注意:临时节点不能含有任何子节点)
-create -e -s path data 创建临时顺序节点(注意:临时节点不能含有任何子节点)
3. stat path 查看节点状态
4.set path data 修改节点数据
5.ls2 path 查看节点下孩子和当前节点的状态
6.history 查看操作历史
7.get path 获得节点上绑定的数据信息
8.delete path 删除节点(注意:删除节点不能含有子节点)
9.rmr path 递归删除节点(注意:会将当前节点下所有节点删除)
10.quit 退出当前会话(会话失效)
1.7 watch节点监听机制
一个是节点目录监听
一个是数据监听
客户端可以监测znode节点的变化。znode节点的变化触发相应的事件,然后清除对该节点的监测。当监测一个znode节点时候,zookeeper会发送通知给监测节点。一个watch事件是一个一次性的触发器,当被设置了watch的数据和目录发生了改变的时候,则服务器将这个改变发生给设置了watch的客户端以便通知它们。
#1. ls /path true 监听节点目录的变化
#2. get /path true 监听节点数据的变化
1.8 Java连接zookeeper
#引入zookeeper客户端依赖
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.example</groupId>
<artifactId>zookeeper</artifactId>
<version>1.0-SNAPSHOT</version>
<dependencies>
<!-- 引入zookeeper依赖-->
<!-- https://mvnrepository.com/artifact/org.apache.zookeeper/zookeeper -->
<!-- https://mvnrepository.com/artifact/com.101tec/zkclient -->
<dependency>
<groupId>com.101tec</groupId>
<artifactId>zkclient</artifactId>
<version>0.11</version>
</dependency>
</dependencies>
</project>
package cc.test.com;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.serialize.SerializableSerializer;
import org.apache.zookeeper.CreateMode;
import java.util.List;
public class TestZkclient {
public static void main(String[] args) {
// 参数1:zk server的服务IP地址:端口号
// 参数2:会话超时时间
// 参数3:连接超时时间
// 参数4:序列化方式
ZkClient zkClient=new ZkClient("192.168.1.200:2181",60000*30,60000,new SerializableSerializer());
System.out.println(zkClient);
// 创建持久节点
zkClient.create("/del","zhangsan", CreateMode.PERSISTENT);
//创建持久顺序节点
zkClient.create("/node/name","dema",CreateMode.PERSISTENT_SEQUENTIAL);
//创建临时节点
zkClient.create("/name/lists","yasuo",CreateMode.EPHEMERAL);
//创建临时顺序节点
zkClient.create("/node/zhangsan","data",CreateMode.EPHEMERAL_SEQUENTIAL);
//删除节点
boolean delete = zkClient.delete("/del");
//查看节点是否删除
System.out.println(delete);
//递归删除
boolean b = zkClient.deleteRecursive("/node");
//查看是否删除成功
System.out.println(b);
//查询节点
List<String> children = zkClient.getChildren("/");
for (String chil:children){
System.out.println(chil);
}
zkClient.close();
}
}
1.9 集群
1.9.1 在data下创建myid
[root@server-200 zookeeper-3.6.3]# cd /usr/local/zookeeper/
[root@server-200 zookeeper-3.6.3]# mkdir data1 data2 data3
[root@server-200 zookeeper-3.6.3]# touch data1/myid data2/myid data3/myid
#创建不同的标识
[root@server-200 zookeeper-3.6.3]# echo 1 > data1/myid
[root@server-200 zookeeper-3.6.3]# cat data1/myid
1
[root@server-200 zookeeper-3.6.3]# echo 2 > data2/myid
[root@server-200 zookeeper-3.6.3]# echo 3 > data3/myid
1.9.2 修改zoo.cfg文件
[root@server-200 conf]# cp zoo.cfg zoo1.cfg
[root@server-200 conf]# cp zoo.cfg zoo2.cfg
[root@server-200 conf]# cp zoo.cfg zoo3.cfg
[root@server-200 conf]# vim zoo1.cfg
[root@server-200 conf]# vim zoo2.cfg
[root@server-200 conf]# vim zoo3.cfg
#zoo1.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper-3.6.3/data1
clientPort=2181
server.1=192.168.1.200:4003:3003
server.2=192.168.1.200:4004:3004
server.3=192.168.1.200:4005:3005
#zoo2.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper-3.6.3/data2
clientPort=2182
server.1=192.168.1.200:4003:3003
server.2=192.168.1.200:4004:3004
server.3=192.168.1.200:4005:3005
#zoo3.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper-3.6.3/data3
clientPort=2183
server.1=192.168.1.200:4003:3003
server.2=192.168.1.200:4004:3004
server.3=192.168.1.200:4005:3005
1.9.3 启动集群
[root@server-200 conf]# /usr/local/zookeeper-3.6.3/bin/zkServer.sh start /usr/local/zookeeper-3.6.3/conf/zoo1.cfg
[root@server-200 conf]# /usr/local/zookeeper-3.6.3/bin/zkServer.sh start /usr/local/zookeeper-3.6.3/conf/zoo2.cfg
[root@server-200 conf]# /usr/local/zookeeper-3.6.3/bin/zkServer.sh start /usr/local/zookeeper-3.6.3/conf/zoo3.cfg
[root@server-200 conf]# jps
5905 QuorumPeerMain
6452 Jps
5980 QuorumPeerMain
6060 QuorumPeerMain
1.9.4 查看集群状态
[root@server-200 conf]# /usr/local/zookeeper-3.6.3/bin/zkServer.sh status /usr/local/zookeeper-3.6.3/conf/zoo1.cfg
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.6.3/conf/zoo1.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Error contacting service. It is probably not running.
[root@server-200 conf]# jps
5905 QuorumPeerMain
7224 Jps
5980 QuorumPeerMain
6060 QuorumPeerMain
[root@server-200 conf]# /usr/local/zookeeper-3.6.3/bin/zkServer.sh status /usr/local/zookeeper-3.6.3/conf/zoo2.cfg
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.6.3/conf/zoo2.cfg
Client port found: 2182. Client address: localhost. Client SSL: false.
Error contacting service. It is probably not running.
[root@server-200 conf]# /usr/local/zookeeper-3.6.3/bin/zkServer.sh status /usr/local/zookeeper-3.6.3/conf/zoo3.cfg
ZooKeeper JMX enabled by default
zookeeper-3.6.3/conf/zoo2.cfg
Client port found: 2182. Client address: localhost. Client SSL: false.
Error contacting service. It is probably not running.
[root@server-200 conf]# /usr/local/zookeeper-3.6.3/bin/zkServer.sh status /usr/local/zookeeper-3.6.3/conf/zoo3.cfg
ZooKeeper JMX enabled by default