Zookeeper作业

一、基于Zookeeper实现简易版配置中心要求实现以下功能:

1. 创建一个Web项目,将数据库连接信息交给Zookeeper配置中心管理,即:当项目Web项目启动时,从Zookeeper进行MySQL配置参数的拉取

2. 要求项目通过数据库连接池访问MySQL(连接池可以自由选择熟悉的)

3. 当Zookeeper配置信息变化后Web项目自动感知,正确释放之前连接池,创建新的连接池

思路

1.启动程序,使用默认的配置项zk中创建永久节点,并使用该配置连接数据库
2.使用zk监听文件内容变化,当数据变化时,释放之前的连接,使用新的配置进行连接
3.手动变更永久节点的值,查看日志,看连接是否发生改变

package com.colin.config;

import com.mchange.v2.c3p0.ComboPooledDataSource;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
import org.apache.curator.retry.RetryNTimes;

import javax.sql.DataSource;
import java.beans.PropertyVetoException;
import java.io.PrintWriter;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent.Type.CHILD_UPDATED;

/**
 * 利用Zookeeper来构建数据源,并且在变更时动态更改配置<br />
 * 1. 基于{@link DataSource}方便在其他框架中使用,提高通用性(例如SpringBean托管)<br />
 * 2. 利用代理模式,来交给第三方的数据源{@link ComboPooledDataSource}来进行处理,这里只负责动态变更<br />
 * 3. 当检测到Zookeeper中的配置变更时,动态更改数据源对象的引用来达到切换的目的, 并且将原先的数据源关闭(防止占用不必要链接)
 */
public class DynamicConfigDatasource implements DataSource {

    /**
     * 当前的配置信息
     */
    private Map<String, String> configMap = new HashMap<>();

    /**
     * 需要监听的配置路径
     */
    private static final String CONFIG_PREFIX = "/CONFIG";

    /**
     * 与Zookeeper建立链接的client
     */
    private final CuratorFramework zkClient;

    /**
     * 真实所代理的连接池信息
     */
    private volatile ComboPooledDataSource datasource;

    /**
     * 新建一个数据源
     * @param zookeeperAddress zookeeper访问地址
     */
    public static DynamicConfigDatasource create(String zookeeperAddress) {
        return new DynamicConfigDatasource(CuratorFrameworkFactory.newClient(zookeeperAddress, new RetryNTimes(3, 1000)));
    }

    /**
     * 根据Zookeeper链接来生成
     */
    private DynamicConfigDatasource(CuratorFramework zkClient) {
        this.zkClient = zkClient;
        if (!zkClient.getState().equals(CuratorFrameworkState.STARTED)) {
            zkClient.start();
        }

        try {
            // 保存当前的数据源配置信息
            saveDatasourceConfig();

            // 进行初始化真实的数据源
            initDataSource();

            // 进行启动监听
            startListener();
        } catch (Exception e) {
            throw new IllegalStateException("启动动态代理数据源失败!", e);
        }
    }

    @Override
    public Connection getConnection() throws SQLException {
        return datasource.getConnection();
    }

    @Override
    public Connection getConnection(String username, String password) throws SQLException {
        return datasource.getConnection(username, password);
    }

    /**
     * 获取zk配置, 并且将其保存到内存变量中
     */
    private void saveDatasourceConfig() throws Exception {
        List<String> childrenNames = zkClient.getChildren().forPath(CONFIG_PREFIX);
        for (String childrenName : childrenNames) {
            String value = new String(zkClient.getData().forPath(CONFIG_PREFIX + "/" + childrenName));
            configMap.put(childrenName,value);
        }
    }

    /**
     * 启动对数据源的监听
     * @throws Exception
     */
    private void startListener() throws Exception {

        PathChildrenCache watcher = new PathChildrenCache(zkClient, CONFIG_PREFIX, true);
        watcher.getListenable().addListener(new PathChildrenCacheListener() {

            public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent event) throws Exception {
                if (event.getType() != CHILD_UPDATED) {
                    return;
                }
                System.out.println("检测到数据源信息变更:" + new String(event.getData().getData()));

                // 重置配置信息
                saveDatasourceConfig();

                // 获取当前系统中的数据源
                final ComboPooledDataSource currentDatasource = DynamicConfigDatasource.this.datasource;

                // 进行重置数据源
                initDataSource();

                // 关闭系统中遗留的数据源信息
                if (currentDatasource != null) {
                    currentDatasource.close();
                }
            }
        });
        watcher.start();
        System.out.println("完成对数据源的监听操作");
    }

    /**
     * 从配置文件中初始化数据源
     * @throws PropertyVetoException
     * @throws SQLException
     */
    private void initDataSource() throws PropertyVetoException, SQLException {
        ComboPooledDataSource dataSource = new ComboPooledDataSource();
        dataSource.setDriverClass(configMap.get("driverClassName"));
        dataSource.setJdbcUrl(configMap.get("dbJDBCUrl"));
        dataSource.setUser(configMap.get("username"));
        dataSource.setPassword(configMap.get("password"));

        // 将数据源切换为新的数据源
        this.datasource = dataSource;
    }


    @Override
    public <T> T unwrap(Class<T> iface) throws SQLException {
        return null;
    }

    @Override
    public boolean isWrapperFor(Class<?> iface) throws SQLException {
        return false;
    }

    @Override
    public PrintWriter getLogWriter() throws SQLException {
        return null;
    }

    @Override
    public void setLogWriter(PrintWriter out) throws SQLException {
    }

    @Override
    public void setLoginTimeout(int seconds) throws SQLException {
    }

    @Override
    public int getLoginTimeout() throws SQLException {
        return 0;
    }

    @Override
    public java.util.logging.Logger getParentLogger() throws SQLFeatureNotSupportedException {
        return null;
    }

}
package com.colin;

import com.colin.config.DynamicConfigDatasource;

import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;

public class Task3Application {

    public static void main(String[] args) throws Exception {
        final DynamicConfigDatasource datasource = DynamicConfigDatasource.create("127.0.0.1");
        System.out.println("初始化数据域完成!");
        testConnection(datasource);

        // 不停当代用户输入,这期间用来等待数据变更
        while (true) {
            final int read = System.in.read();
            testConnection(datasource);
        }
    }

    private static void testConnection(DynamicConfigDatasource datasource) throws SQLException {
        try (
            Connection connection = datasource.getConnection();
            ResultSet resultSet = connection.prepareStatement("select user()").executeQuery();
        ) {
            while (resultSet.next()) {
                System.out.println("当前登录的用户名:" + resultSet.getString(1));
            }
        }
    }

}

Hbase作业

Azkaban作业

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
课后作业 1. 简述zookeeper在HBase中的作用 2. 如何获取链接zookeeper的客户端的信息 3. 简述如何用zookeeper实现一个普通的一对多fifo队列 4. 使用伪代码简述mapreduce的流程和中间结果 其中input和output都是fileinputformat和fileoutputformat 5. 编写程序实现倒排索引 首先准备数据:1.txt,文件内容如下: The Apache Hadoop software library is a framework that allows for the distributed processing of large data sets across clusters of computers using simple programming models. It is designed to scale up from single servers to thousands of machines, each offering local computation and storage. Rather than rely on hardware to deliver high-availability, the library itself is designed to detect and handle failures at the application layer, so delivering a highly-available service on top of a cluster of computers, each of which may be prone to failures. 文件2.txt的内容如下所示: In order to scale the name service horizontally, federation uses multiple independent Namenodes/Namespaces. The Namenodes are federated, that is, the Namenodes are independent and don’t require coordination with each other. The datanodes are used as common storage for blocks by all the Namenodes. Each datanode registers with all the Namenodes in the cluster. Datanodes send periodic heartbeats and block reports and handles commands from the Namenodes. 建立类似的文件,放入/test1文件夹下 要求处理结果为: The 1.txt n 2.txt n Apache 1.txt n 2.txt n 其中,n为某个单词在某个文件出现的次数,即,格式解释为:单词iterator<所在文件 出现次数>

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Colin_lqk

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值