第四章 Sysrepo连接与会话

写在前面,最近忙于工作与上王者。王者已上,已无它望,哈哈,以后还是保持尽量保证每周一更。

1 、何为连接与会话

      开发者要开始使用Sysrepo,首先必须创建一个连接。一个应用程序或者进程即使可以允许创建多个连接,但是一般情况只会创建一个连接。Sysrepo允许同时创建多个连接。简单的举个例子,通常情况下,sysrepo-plugin在init_cb初始时就会创建一个连接,这是一个由sysrepo-plugin与Sysrepo所创建的连接,只要发生异常不释放,该连接会一直存在整个sysrepo-plugin进程的生命周期,此外,例如用户通过sysrepoctl -l |grep ***看某个Yang模型是否已经加载,sysrepoctl应用程序也创建一个短连接,该连接在命令执行结束后立即释放,假如是极端修改,不释放该连接,再使用sysrepocfg来配置runing库,这时有3个与Sysrepo连接。并且3个连接不干扰,也不影响Sysrepo的正常工作。

     而会话,是建立在连接之下,一个连接下可以创建多个会话,每个会话都有一个唯一的标识,每个会话总是可以选择一个可随时更改的数据库,使用些会话的所有API调用都将在该数据库下操作。

连接与会话的关系如下所示,可能不是特别准备,但大概就是这个意思。

图1 连接与会话关系

 

2、核心数据结构

     Connection的数据结构主要是存储Sysrepo连接与Libyang的上下文,该连接所创建的共享内存结构。数据结构定义如下

/**
 * @brief Sysrepo connection.
 */
struct sr_conn_ctx_s {
    struct ly_ctx *ly_ctx;          /**< Libyang context, also available to user. */
    sr_conn_options_t opts;         /**< Connection options. */
    sr_diff_check_cb diff_check_cb; /**< Connection user diff check callback. */

    pthread_mutex_t ptr_lock;       /**< Session-shared lock for accessing pointers to sessions. */
    sr_session_ctx_t **sessions;    /**< Array of sessions for this connection. */
    uint32_t session_count;         /**< Session count. */

    int main_create_lock;           /**< Process-shared file lock for creating main/ext SHM. */
    sr_rwlock_t ext_remap_lock;     /**< Session-shared lock for remapping ext SHM. */
    sr_shm_t main_shm;              /**< Main SHM structure. */
    sr_shm_t ext_shm;               /**< External SHM structure (all stored offsets point here). */

    struct sr_mod_cache_s {
        sr_rwlock_t lock;           /**< Session-shared lock for accessing the module cache. */
        struct lyd_node *data;      /**< Data of all cached modules, */

        struct {
            const struct lys_module *ly_mod;    /**< Libyang module in the cache. */
            uint32_t ver;           /**< Version of the module data in the cache, 0 is not valid */
        } *mods;                    /**< Array of cached modules. */
        uint32_t mod_count;         /**< Cached modules count. */
    } mod_cache;                    /**< Module running data cache. */
}

Cache需要特别说明:如果一个会话工作在Running的数据库下操作,并且该会话的连接使能Cache功能,则不会每次都从Sysrepo中加载数据,可以从Cache中复制数据,这样,可以大幅度提高Sysrepo的处理性能。

    Session的主要数据结构


/**
 * @brief Sysrepo session.
 */
struct sr_session_ctx_s {
    sr_conn_ctx_t *conn;            /**< Connection used for creating this session. */
    sr_datastore_t ds;              /**< Datastore of the session. */
    sr_sub_event_t ev;              /**< Event of a callback session. ::SR_EV_NONE for standard user sessions. */
    sr_sid_t sid;                   /**< Session information. */
    sr_error_info_t *err_info;      /**< Session error information. */

    pthread_mutex_t ptr_lock;       /**< Lock for accessing pointers to subscriptions. */
    sr_subscription_ctx_t **subscriptions;  /**< Array of subscriptions of this session. */
    uint32_t subscription_count;    /**< Subscription count. */

    struct {
        struct lyd_node *edit;      /**< Prepared edit data tree. */
        struct lyd_node *diff;      /**< Diff data tree, used for module change iterator. */
    } dt[SR_DS_COUNT];              /**< Session-exclusive prepared changes. */

    struct sr_sess_notif_buf {
        ATOMIC_T thread_running;    /**< Flag whether the notification buffering thread of this session is running. */
        pthread_t tid;              /**< Thread ID of the thread. */
        sr_rwlock_t lock;           /**< Lock for accessing thread_running and the notification buffer
                                         (READ-lock is not used). */
        struct sr_sess_notif_buf_node {
            char *notif_lyb;        /**< Buffered notification to be stored in LYB format. */
            time_t notif_ts;        /**< Buffered notification timestamp. */
            const struct lys_module *notif_mod; /**< Buffered notification modules. */
            struct sr_sess_notif_buf_node *next;    /**< Next stored notification buffer node. */
        } *first;                   /**< First stored notification buffer node. */
        struct sr_sess_notif_buf_node *last;    /**< Last stored notification buffer node. */
    } notif_buf;                    /**< Notification buffering attributes. */
}

    从Session结构中主要是用于该次session的连接,该次session要连接的数据库类型(4种,runing,startup,candidate,operation),以及重中之重的sr_subscription_ctx_t **subscriptions,sysrepo的所支持操作的订阅都在该结构中定义,不多说,直接看数据结构定义:

/**
 * @brief Sysrepo subscription.
 */
struct sr_subscription_ctx_s {
    sr_conn_ctx_t *conn;            /**< Connection of the subscription. */
    uint32_t evpipe_num;            /**< Event pipe number of this subscription structure. */
    int evpipe;                     /**< Event pipe opened for reading. */
    ATOMIC_T thread_running;        /**< Flag whether the thread handling this subscription is running. */
    pthread_t tid;                  /**< Thread ID of the handler thread. */
    pthread_mutex_t subs_lock;      /**< Session-shared lock for accessing specific subscriptions. */

    struct modsub_change_s {
        char *module_name;          /**< Module of the subscriptions. */
        sr_datastore_t ds;          /**< Datastore of the subscriptions. */
        struct modsub_changesub_s {
            char *xpath;            /**< Subscription XPath. */
            uint32_t priority;      /**< Subscription priority. */
            sr_subscr_options_t opts;   /**< Subscription options. */
            sr_module_change_cb cb; /**< Subscription callback. */
            void *private_data;     /**< Subscription callback private data. */
            sr_session_ctx_t *sess; /**< Subscription session. */

            uint32_t request_id;    /**< Request ID of the last processed request. */
            sr_sub_event_t event;   /**< Type of the last processed event. */
        } *subs;                    /**< Configuration change subscriptions for each XPath. */
        uint32_t sub_count;         /**< Configuration change module XPath subscription count. */

        sr_shm_t sub_shm;           /**< Subscription SHM. */
    } *change_subs;                 /**< Change subscriptions for each module. */
    uint32_t change_sub_count;      /**< Change module subscription count. */

    struct modsub_oper_s {
        char *module_name;          /**< Module of the subscriptions. */
        struct modsub_opersub_s {
            char *xpath;            /**< Subscription XPath. */
            sr_oper_get_items_cb cb;    /**< Subscription callback. */
            void *private_data;     /**< Subscription callback private data. */
            sr_session_ctx_t *sess; /**< Subscription session. */

            uint32_t request_id;    /**< Request ID of the last processed request. */
            sr_shm_t sub_shm;       /**< Subscription SHM. */
        } *subs;                    /**< Operational subscriptions for each XPath. */
        uint32_t sub_count;         /**< Operational module XPath subscription count. */
    } *oper_subs;                   /**< Operational subscriptions for each module. */
    uint32_t oper_sub_count;        /**< Operational module subscription count. */

    struct modsub_notif_s {
        char *module_name;          /**< Module of the subscriptions. */
        struct modsub_notifsub_s {
            char *xpath;            /**< Subscription XPath. */
            time_t start_time;      /**< Subscription start time. */
            int replayed;           /**< Flag whether the subscription replay is finished. */
            time_t stop_time;       /**< Subscription stop time. */
            sr_event_notif_cb cb;   /**< Subscription value callback. */
            sr_event_notif_tree_cb tree_cb; /**< Subscription tree callback. */
            void *private_data;     /**< Subscription callback private data. */
            sr_session_ctx_t *sess; /**< Subscription session. */
        } *subs;                    /**< Notification subscriptions for each XPath. */
        uint32_t sub_count;         /**< Notification module XPath subscription count. */

        uint32_t request_id;    /**< Request ID of the last processed request. */
        sr_shm_t sub_shm;           /**< Subscription SHM. */
    } *notif_subs;                  /**< Notification subscriptions for each module. */
    uint32_t notif_sub_count;       /**< Notification module subscription count. */

    struct opsub_rpc_s {
        char *op_path;              /**< Subscription RPC/action path. */
        struct opsub_rpcsub_s {
            char *xpath;            /**< Subscription XPath. */
            uint32_t priority;      /**< Subscription priority. */
            sr_rpc_cb cb;           /**< Subscription value callback. */
            sr_rpc_tree_cb tree_cb; /**< Subscription tree callback. */
            void *private_data;     /**< Subscription callback private data. */
            sr_session_ctx_t *sess; /**< Subscription session. */

            uint32_t request_id;    /**< Request ID of the last processed request. */
            sr_sub_event_t event;   /**< Type of the last processed event. */
        } *subs;                    /**< RPC/action subscription for each XPath. */
        uint32_t sub_count;         /**< RPC/action XPath subscription count. */

        sr_shm_t sub_shm;           /**< Subscription SHM. */
    } *rpc_subs;                    /**< RPC/action subscriptions for each operation. */
    uint32_t rpc_sub_count;         /**< RPC/action operation subscription count. */
}

3、Connection函数

/*功能:连接sysrepo数据库
 *输入:默认的连接处理选项
 *输出:该连接的数据,用于该连接的后续的操作,最后由sr_disconnect释放
 */
API int
sr_connect(const sr_conn_options_t opts, sr_conn_ctx_t **conn_p)
{
    sr_error_info_t *err_info = NULL;
    sr_conn_ctx_t *conn = NULL;
    struct lyd_node *sr_mods = NULL;
    int created = 0, changed;
    sr_main_shm_t *main_shm;
    uint32_t conn_count;

    SR_CHECK_ARG_APIRET(!conn_p, NULL, err_info);

    /* check that all required directories exist */
    /* 路径包括startup库的存储路径,notify的路径,sysrepo加载的Yang的路径。
     * 并且获取以上路径的访问权限。与路径有关的,都在CMakeLists.txt中的定义
     * 使用者可以修改路径,也可以使用定义的默认路径。
     */
    if ((err_info = sr_shmmain_check_dirs())) {
        goto cleanup;
    }

    /* create basic connection structure */
    /*创建一个基础连接结构,包括,分配连接的存储空间,初始化YANG的上下文,
     * 互斥信号量初始化,共享内存文件锁权限打开,读写锁的初始化
     */
    if ((err_info = sr_conn_new(opts, &conn))) {
        goto cleanup;
    }
    
    /* CREATE LOCK */
    /*加锁*/
    if ((err_info = sr_shmmain_createlock(conn->main_create_lock))) {
        goto cleanup;
    }

    /* open the main SHM */
    /*初始化主SHM,打开主SHM,为主SHM分配合适的空间,并对主SHM做相应的初始化*/
    if ((err_info = sr_shmmain_main_open(&conn->main_shm, &created))) {
        goto cleanup_unlock;
    }

    /* open the ext SHM */
     /*初始化扩展SHM,打开扩展SHM,为扩展SHM分配合适的空间,并对扩展SHM做相应的初始化*/
    if ((err_info = sr_shmmain_ext_open(&conn->ext_shm, created))) {
        goto cleanup_unlock;
    }
    
    /*Sysrepo SHM使用主+扩展SHM机制,整体机制在后面细谈,此处主要是将连接的创建。先略过*/
    /*该行代码之前的操作,都是基本操作,权限获取,内存大小分配初始化.该行代码之后,需要将已加载的 
      YANG数据模型做解析,并更新存储到相应的结构中*/

    /* update connection context based on stored lydmods data */
    /* 加载已经存储的YANG模型,并响应任意的调试变化,并要更新Connection的上下文
     * 根据前面所创建所保存的libyang的上下文ly_ctx,如果检测到lyd_node不存在,则为Sysrepo创建一 
     * 个新的YANG模型数据结构struct lyd_node,如果存在,则解析sysrepo Yang 模型数据.并对模块作上 
     * 下文件的更新.但是对于第一个连接,lyd_node一开始都是不存在的,所以,在完成创建与加载lyd_node 
     *  后,需要将全部的YANG模型解析到lys_module->lyd_node中,一个YANG模型相当于lys_module- 
     *  >lyd_node下的一个节点,一个节点一个节点加载,挂载到lys_module的链表中.
     */
    if ((err_info = sr_conn_lydmods_ctx_update(conn, created || !(opts & 
           SR_CONN_NO_SCHED_CHANGES), &sr_mods, &changed))) {
        goto cleanup_unlock;
    }
    
    /*这段代码不解读,看注释就能明白*/
    if (changed || created) {
        /* clear all main SHM modules (if main SHM was just created, there aren't any anyway) */
        if ((err_info = sr_shm_remap(&conn->main_shm, sizeof(sr_main_shm_t)))) {
            goto cleanup_unlock;
        }
        main_shm = (sr_main_shm_t *)conn->main_shm.addr;
        main_shm->mod_count = 0;

        /* clear ext SHM (there can be no connections and no modules) */
        if ((err_info = sr_shm_remap(&conn->ext_shm, sizeof(size_t)))) {
            goto cleanup_unlock;
        }
        /* set wasted mem to 0 */
        *((size_t *)conn->ext_shm.addr) = 0;

        /* add all the modules in lydmods data into main SHM */
        if ((err_info = sr_shmmain_add(conn, sr_mods->child))) {
            goto cleanup_unlock;
        }
        
        /* copy full datastore from <startup> to <running> */
        /*初始化时,完成将startup库的文件copy到running库中,常见的配置恢复是在此处完成*/
        if ((err_info = sr_shmmain_files_startup2running(conn, created))) {
            goto cleanup_unlock;
        }

        /* check data file existence and owner/permissions of all installed modules */
        if ((err_info = sr_shmmain_check_data_files(conn))) {
            goto cleanup_unlock;
        }
    }

    /* remember connection count */
    main_shm = (sr_main_shm_t *)conn->main_shm.addr;
    conn_count = main_shm->conn_state.conn_count;

    /* CREATE UNLOCK */
    sr_shmmain_createunlock(conn->main_create_lock);

    /* SHM LOCK (mainly to recover connections) */
    if ((err_info = sr_shmmain_lock_remap(conn, SR_LOCK_NONE, 1, 0, __func__))) {
        goto cleanup;
    }

    if (conn_count && !(opts & SR_CONN_NO_SCHED_CHANGES) && !main_shm->conn_state.conn_count) {

        /* SHM UNLOCK */
        sr_shmmain_unlock(conn, SR_LOCK_NONE, 1, 0, __func__);

        /* all the connections were stale so we actually can apply scheduled changes, recreate the whole connection */
        assert(!err_info);

        lyd_free_withsiblings(sr_mods);
        sr_conn_free(conn);
        return sr_connect(opts, conn_p);
    }

    /* add connection into state */
    err_info = sr_shmmain_conn_state_add(conn);

    /* SHM UNLOCK */
    /*打开锁*/
    sr_shmmain_unlock(conn, SR_LOCK_NONE, 1, 0, __func__);

    goto cleanup;

cleanup_unlock:
    /* CREATE UNLOCK */
    sr_shmmain_createunlock(conn->main_create_lock);

cleanup:
    lyd_free_withsiblings(sr_mods);
    if (err_info) {
        sr_conn_free(conn);
        if (created) {
            /* remove any created SHM so it is not considered properly created */
            shm_unlink(SR_MAIN_SHM);
            shm_unlink(SR_EXT_SHM);
        }
    } else {
        *conn_p = conn;
    }
    return sr_api_ret(NULL, err_info);
}

 

/* 功能:清除与释放由sr_connect分配的的连接上下文,
 * 在该连接下的所有session与订阅将自动停止并清理回收
 * 输入: 调用sr_connect中创建的连接上下文
 */
/**其它不做解释,看注释,很清楚/
API int
sr_disconnect(sr_conn_ctx_t *conn)
{
    sr_error_info_t *err_info = NULL, *lock_err = NULL, *tmp_err;
    uint32_t i;
    int wr_lock = 0;
    sr_main_shm_t *main_shm;

    if (!conn) {
        return sr_api_ret(NULL, NULL);
    }

    /* stop all subscriptions */
    for (i = 0; i < conn->session_count; ++i) {
        while (conn->sessions[i]->subscription_count && conn->sessions[i]->subscriptions[0]) {
            if (!wr_lock) {
                /* SHM LOCK */
                lock_err = sr_shmmain_lock_remap(conn, SR_LOCK_WRITE, 1, 0, __func__);
                sr_errinfo_merge(&err_info, lock_err);

                wr_lock = 1;
            }

            tmp_err = _sr_unsubscribe(conn->sessions[i]->subscriptions[0]);
            sr_errinfo_merge(&err_info, tmp_err);
        }
    }

    /* we need just remap lock or even no lock (no other threads can use the mapping)
     * would be fine, but be robust */
    if (!wr_lock) {
        /* SHM LOCK */
        lock_err = sr_shmmain_lock_remap(conn, SR_LOCK_NONE, 1, 0, __func__);
        sr_errinfo_merge(&err_info, lock_err);
    }

    /* stop all the sessions */
    while (conn->session_count) {
        tmp_err = _sr_session_stop(conn->sessions[0]);
        sr_errinfo_merge(&err_info, tmp_err);
    }

    /* free any stored operational data */
    tmp_err = sr_shmmod_oper_stored_del_conn(conn, conn, getpid());
    sr_errinfo_merge(&err_info, tmp_err);

    main_shm = (sr_main_shm_t *)conn->main_shm.addr;

    /* CONN STATE LOCK */
    tmp_err = sr_mlock(&main_shm->conn_state.lock, SR_CONN_STATE_LOCK_TIMEOUT, __func__);
    sr_errinfo_merge(&err_info, tmp_err);

    /* remove from state */
    sr_shmmain_conn_state_del(main_shm, conn->ext_shm.addr, conn, getpid());

    /* CONN STATE UNLOCK */
    sr_munlock(&main_shm->conn_state.lock);

    if (!lock_err) {
        /* SHM UNLOCK */
        if (wr_lock) {
            sr_shmmain_unlock(conn, SR_LOCK_WRITE, 1, 0, __func__);
        } else {
            sr_shmmain_unlock(conn, SR_LOCK_NONE, 1, 0, __func__);
        }
    }

    /* free attributes */
    sr_conn_free(conn);

    return sr_api_ret(NULL, err_info);
}

4 、Session

/*功能:开始一个新的session
 *输入:conn: 由sr_connect所创建的连接
 *     datastore: 连接的数据库类型
 *输出: 用于后续的API调用的session上下文件  
 */
API int
sr_session_start(sr_conn_ctx_t *conn, const sr_datastore_t datastore, sr_session_ctx_t **session)
{
    sr_error_info_t *err_info = NULL;
    sr_main_shm_t *main_shm;
    uid_t uid;

    SR_CHECK_ARG_APIRET(!conn || !session, NULL, err_info);
    /*分配1个sizeof (**session)大小的内存空间,并初始化为0*/
    *session = calloc(1, sizeof **session);
    if (!*session) {
        SR_ERRINFO_MEM(&err_info);
        return sr_api_ret(NULL, err_info);
    }

    /* use new SR session ID and increment it (no lock needed, we are just reading and main SHM is never remapped) */
    /**使用了C++的atomic机制,在C中引入,需要增加编译选项,如何增加,参考CMakeFile.txt.*/
    main_shm = (sr_main_shm_t *)conn->main_shm.addr;
    (*session)->sid.sr = ATOMIC_INC_RELAXED(main_shm->new_sr_sid);
    if ((*session)->sid.sr == (uint32_t)(ATOMIC_T_MAX - 1)) {
        /* the value in the main SHM is actually ATOMIC_T_MAX and calling another INC would cause an overflow */
        ATOMIC_STORE_RELAXED(main_shm->new_sr_sid, 1);
    }

    /* remember current real process owner */
    uid = getuid();
    if ((err_info = sr_get_pwd(&uid, &(*session)->sid.user))) {
        goto error;
    }

    /* add the session into conn */
    if ((err_info = sr_ptr_add(&conn->ptr_lock, (void ***)&conn->sessions, &conn->session_count, *session))) {
        goto error;
    }

    (*session)->conn = conn;
    (*session)->ds = datastore;
    if ((err_info = sr_mutex_init(&(*session)->ptr_lock, 0))) {
        goto error;
    }
    if ((err_info = sr_rwlock_init(&(*session)->notif_buf.lock, 0))) {
        goto error;
    }

    SR_LOG_INF("Session %u (user \"%s\") created.", (*session)->sid.sr, (*session)->sid.user);

    return sr_api_ret(NULL, NULL);

error:
    free((*session)->sid.user);
    free(*session);
    *session = NULL;
    return sr_api_ret(NULL, err_info);
}

 

/* 功能:停止当前session并且释放与该session所维系的全部资源
 * 输入: sr_session_start中所创建的session上下文
 */
####函数清晰,简单,注释丰富,一看就懂,就不多废话.
API int
sr_session_stop(sr_session_ctx_t *session)
{
    sr_error_info_t *err_info = NULL, *lock_err = NULL, *tmp_err;
    sr_conn_ctx_t *conn;
    int wr_lock = 0;

    if (!session) {
        return sr_api_ret(NULL, NULL);
    }

    conn = session->conn;

    /* stop all subscriptions of this session */
    while (session->subscription_count) {
        if (!wr_lock) {
            /* SHM LOCK */
            lock_err = sr_shmmain_lock_remap(conn, SR_LOCK_WRITE, 1, 0, __func__);
            sr_errinfo_merge(&err_info, lock_err);

            wr_lock = 1;
        }

        tmp_err = sr_subs_session_del(session, session->subscriptions[0]);
        sr_errinfo_merge(&err_info, tmp_err);
    }

    /* SHM UNLOCK */
    if (wr_lock && !lock_err) {
        sr_shmmain_unlock(conn, SR_LOCK_WRITE, 1, 0, __func__);
    }

    /* no lock needed, we are just reading main SHM */
    tmp_err = _sr_session_stop(session);
    sr_errinfo_merge(&err_info, tmp_err);

    return sr_api_ret(NULL, err_info);
}

连接与会话核心处就是这4个API函数,其它与连接与会话有关的API都是对相关的补充,想要进一步了解的.请阅读源码.

接下来会分析sysrepo的共享内存机制.SHM机制是新Sysrepo的核心,需要好好说道说道.

  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值