zookeeper监听服务的实际应用
近期做了个通用搜索的功能,即通过在页面上指定要查询的数据表和要查询的字段,生成全量数据的Elasticsearch索引,启动logstash实行增量数据同步,并对外提供DubboAPI用以搜索。
以上是背景,说说此过程中要解决的一个问题:增量数据同步的logstash进程在两台机器上都运行着,而logstash依赖的sql文件是动态生成的,因此需要确保在索引编辑页面上进行修改后,两台机器上的sql是一致的。
logstash的配置
input {
jdbc {
schedule => "*/5 * * * * *"
jdbc_driver_library => "/logstash/lib/thirdlib/mysql-connector-java-5.1.40-bin.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://'$dbHost':'$dbPort'/'$dbName'"
jdbc_user => "'$dbUser'"
jdbc_password => "'$dbPwd'"
jdbc_paging_enabled => "true"
jdbc_page_size => "1000"
//此sql文件为动态生成的
statement_filepath => "'$DIR'/'$type'incr.sql"
jdbc_default_timezone => "Asia/Shanghai"
last_run_metadata_path => "/logstash/bin/.logstash_jdbc_last_run_'$type'"
}
}
解决方案:这里用zookeeper实现数据变更事件的监听。两台机器均监听zookeeper上同一个节点,当es索引发生重建,修改此节点,其他机器监听到znode的变化,也重新生成sql文件,重启logstash进程。
核心代码
- 当重建es索引的http请求到达一台机器,通过修改zkNode数据 通知其他节点
//通知集群内其他节点,保持sql文件一致
notifyAllNode(aliasName, newIndexName);
//启动linux守护进程,用logstash同步增量数据
ExecHelper.execThenDestroy(syncIncrDataCmd, 60 * 1000L);
/**
* 通知集群内其他节点
*
* @param typeName 索引的type
* @param newIndexName 重建后的索引名
* @throws Exception
*/
private void notifyAllNode(String typeName, String newIndexName) throws Exception {
//eg:enterprise3532542@-@172.10.2.1
String data = new StringBuilder(newIndexName).append("@-@").append(IpUtils.getIp()).toString();
String path = "/" + typeName;
//创建节点,赋初值
if (zkClientManager.getClient().checkExists().forPath(path) == null) {
zkClientManager.getClient().create().creatingParentsIfNeeded().forPath(path, data.getBytes());
} else {
//覆盖原值
zkClientManager.getClient().setData().forPath(path, data.getBytes());
}
}
- 所有机器监听znode
/**
* 容器初始化后通用搜索功能的相关处理
*/
@Component
@Slf4j
public class InitConfiguration implements ApplicationListener<ContextRefreshedEvent> {
@Override
public void onApplicationEvent(ContextRefreshedEvent event) {
ApplicationContext applicationContext = event.getApplicationContext();
//顶级applicationContext
if (applicationContext.getParent() == null) {
IdxCommonSearchManager idxManager = applicationContext.getBean(IdxCommonSearchManager.class);
IdxCommonSearchQuery idxQuery = new IdxCommonSearchQuery();
idxQuery.createCriteria().andIsDeletedEqualTo(IsDeletedEnum.NOT_DELETED.getCode());
//在数据表中查询通过通用索引添加的索引
List<IdxCommonSearchDO> idxCommonSearchDOs = idxManager.selectByQuery(idxQuery);
if (CollectionUtils.isEmpty(idxCommonSearchDOs)) {
return;
}
//启动logstash增量同步的进程
ExecutorService executorService = ThreadPoolFactory.getThreadPool();
for (IdxCommonSearchDO idxDO : idxCommonSearchDOs) {
executorService.submit(() -> {
String startLogstashCmd = new StringBuilder("nohup sh ")
.append(AbstractSqlGenerator.START_LOGSTASH_PATH)
.append(" ")
.append(idxDO.getIdxName())
.toString();
log.info("startLogstashCmd:{}", startLogstashCmd);
try {
ExecHelper.execThenDestroy(startLogstashCmd, 60 * 1000L);
} catch (IOException | InterruptedException e) {
log.error("startLogstash error", e);
}
});
}
CommonSearchAO commonSearchAO = applicationContext.getBean(CommonSearchAO.class);
SearchManager searchManager = applicationContext.getBean(SearchManager.class);
//监听zk上节点,若主机A修改了sql文件,让其他主机同步更新
ZkClientManager zkClientManager = applicationContext.getBean(ZkClientManager.class);
CuratorFramework client = zkClientManager.getClient();
PathChildrenCache cache = new PathChildrenCache(client, "/", true);
try {
cache.start();
PathChildrenCacheListener cacheListener = ((client1, event1) -> {
//新增子节点事件||子节点数据变更事件
if (event1.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED || event1.getType() == PathChildrenCacheEvent.Type.CHILD_UPDATED) {
//znode路径,eg:/enterprise
String path = event1.getData().getPath();
String indexName = path.split("/")[1];
//znode内容,格式为"es索引名@-@ip",eg:enterprise12434@-@172.10.1.2
String data = new String(event1.getData().getData());
log.info("nodeChanged,nodePath:{},nodeData:{}", path, data);
String[] dataArr = data.split("@-@");
String ip = dataArr[1];
String realIndexName = dataArr[0];
//本机不处理
if (ip.equals(IpUtils.getIp())) {
return;
}
//重新生成文件
CommonSearchIdxDTO idxDTO = commonSearchAO.buildCommonSearchIdxDTO(indexName);
searchManager.rebuildIncrSql(idxDTO, realIndexName);
}
});
cache.getListenable().addListener(cacheListener);
} catch (Exception e) {
ZkExceptionHandler.handleException(e);
}
}
}
}