zookeeper实现负载均衡其实原理很简单,zookeeper 的数据存储类似于liunx的目录结构。首先建立servers节点,并建立监听器监视servers子节点的状态(用于在服务器增添时及时同步当前集群中服务器列表)。在每个服务器启动时,在servers节点下建立子节点worker server(可以用服务器地址命名),并在对应的字节点下存入服务器的相关信息。这样,我们在zookeeper服务器上可以获取当前集群中的服务器列表及相关信息,可以自定义一个负载均衡算法,在每个请求过来时从zookeeper服务器中获取当前集群服务器列表,根据算法选出其中一个服务器来处理请求。
简单式例:
package xyx.tuny.balance;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.zookeeper.CreateMode;
public class ServiceAProvider {
private String serviceName="service-A";
public void init(){
String rootPath="/servers";
CuratorFramework client = CuratorFrameworkFactory.builder()
.connectString("127.0.0.1:2181")
.sessionTimeoutMs(5000)
.retryPolicy(new ExponentialBackoffRetry(1000, 3))
.build();
client.start();
String ip="192.168.1.23";
try {
if(null == client.checkExists().forPath(rootPath))
client.create().withMode(CreateMode.PERSISTENT).forPath(rootPath);
if(null == client.checkExists().forPath(rootPath+"/"+serviceName))
client.create().withMode(CreateMode.PERSISTENT).forPath(rootPath+"/"+serviceName);
if(null == client.checkExists().forPath(rootPath+"/"+serviceName+"/"+ip))
client.create().withMode(CreateMode.PERSISTENT).forPath(rootPath+"/"+serviceName+"/"+ip);
} catch (Exception e) {
e.printStackTrace();
}
System.out.println("znode:"+rootPath+"/"+serviceName+"/"+ip+"创建完成");
}
//提供服务
public void provide(){
}
public static void main(String[]args) throws Exception {
ServiceAProvider service = new ServiceAProvider();
service.init();
Thread.sleep(1000*60*60*24);
}
}
package xyx.tuny.balance;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.zookeeper.CreateMode;
public class ServiceBProvider {
private String serviceName="service-A";
public void init(){
String rootPath="/servers";
CuratorFramework client = CuratorFrameworkFactory.builder()
.connectString("127.0.0.1:2181")
.sessionTimeoutMs(5000)
.retryPolicy(new ExponentialBackoffRetry(1000, 3))
.build();
client.start();
String ip="192.168.1.24";
try {
if(null == client.checkExists().forPath(rootPath))
client.create().withMode(CreateMode.PERSISTENT).forPath(rootPath);
if(null == client.checkExists().forPath(rootPath+"/"+serviceName))
client.create().withMode(CreateMode.PERSISTENT).forPath(rootPath+"/"+serviceName);
if(null == client.checkExists().forPath(rootPath+"/"+serviceName+"/"+ip))
client.create().withMode(CreateMode.PERSISTENT).forPath(rootPath+"/"+serviceName+"/"+ip);
} catch (Exception e) {
e.printStackTrace();
}
System.out.println("znode:"+rootPath+"/"+serviceName+"/"+ip+"创建完成");
}
//提供服务
public void provide(){
}
public static void main(String[]args) throws Exception {
ServiceBProvider service = new ServiceBProvider();
service.init();
Thread.sleep(1000*60*60*24);
}
}
package xyx.tuny.balance;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.framework.recipes.cache.PathChildrenCache.StartMode;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
public class ServiceConsumer {
private List<String> serverList=new ArrayList<String>();
private String serviceName="service-A";
public void init(){
String zkServerList="172.19.100.29:2181";
final String servicePath="/servers/"+serviceName;
CuratorFramework client = CuratorFrameworkFactory.builder()
.connectString("127.0.0.1:2181")
.sessionTimeoutMs(5000)
.retryPolicy(new ExponentialBackoffRetry(1000, 3))
.build();
client.start();
PathChildrenCache childCache = new PathChildrenCache(client, servicePath, true);
try {
childCache.start(StartMode.POST_INITIALIZED_EVENT);
} catch (Exception e) {
e.printStackTrace();
}
PathChildrenCacheListener childListener = new PathChildrenCacheListener() {
public void childEvent(CuratorFramework client,
PathChildrenCacheEvent event) throws Exception {
System.out.println("work server list changed :" + event.getType());
switch(event.getType()){
case CHILD_ADDED:
serverList = client.getChildren().forPath(servicePath);
System.out.println("work server list changed add, new list is " + serverList);
break;
case CHILD_REMOVED:
serverList = client.getChildren().forPath(servicePath);
System.out.println("work server list changed remove, new list is " + serverList);
break;
default:
break;
}
consume();
}
};
childCache.getListenable().addListener(childListener);
try {
if(null == client.checkExists().forPath(servicePath)){
System.out.println("当前服务的地址"+serverList);
}else{
serverList = client.getChildren().forPath(servicePath);
}
} catch (Exception e) {
e.printStackTrace();
}
}
//通过负责均衡算法,得到一台服务器进行调用
public void consume(){
int index = getRandomNum(0,1);
System.out.println("调用" + serverList.get(index)+"提供的服务:" + serviceName);
}
public int getRandomNum(int min,int max){
Random rdm = new Random();
return rdm.nextInt(max-min+1)+min;
}
public static void main(String[] args)throws Exception {
ServiceConsumer consumer = new ServiceConsumer();
consumer.init();
Thread.sleep(60*60);
}
}
常用的负载均衡策略
- 轮询:请求顺序循环发送每个服务器,若其中一台服务器故障,就会跳过,且不参加下次轮询,直至恢复正常。
- 比率:每个服务器分配一定权重,根据这个权重将请求进行分配,若其中一台服务器故障,就会跳过,且不参加下次分配,直至恢复正常。
- 优先权:给所有服务器分组,给每个组定义优先权,将用户的请求分配给优先级最高的服务器组(在同一组内,采用预先设定的轮询或比率算法,分配用户的请求);当最高优先级中所有服务器或者指定数量的服务器出现故障,把请求送给次优先级的服务器组。这种方式,实际为用户提供一种热备份的方式。
- 最少连接数:记录当前每台服务器或者服务端口上的连接数,新的连接将传递给连接数最少的服务器。当其中某个服务器发生故障,就把其从服务器队列中拿出,不参加下一次的用户请求的分配,直到其恢复正常。
- 最快响应时间:新的连接传递给那些响应最快的服务器。当其中某个服务器发生故障,就把其从服务器队列中拿出,不参加下一次的用户请求的分配,直到其恢复正常。
- 哈希算法:将客户端的源地址,端口进行哈希运算,根据运算的结果转发给一台服务器进行处理,当其中某个服务器发生故障,就把其从服务器队列中拿出,不参加下一次的用户请求的分配,直到其恢复正常。
参考:https://segmentfault.com/a/1190000012185401