DWZ(JUI)的lookupGroup增加回调函数

DWZ 是一个很好的富客户端框架

lookupGroup也是一个必用到的东东,但没有回调函数,后期处理相当的不便。

修改其dwz.database.js

增加几行,就能实行一个很好的回调。

使用就方便了,

如:

源代码

选择部门

function roleid_callback() { jQuery("#roleid").val(jQuery("#roleid_id").val()); }

修改的代码如下

源代码

(function($){

var _lookup = {currentGroup:"", suffix:"", $target:null, pk:"id",callback:null};

var _util = {

    _lookupPrefix: function(key){

        var strDot = _lookup.currentGroup ? "." : "";

        return _lookup.currentGroup + strDot + key + _lookup.suffix;

    },

    lookupPk: function(key){

        return this._lookupPrefix(key);

    },

    lookupField: function(key){

        return this.lookupPk(key);

    }

};



$.extend({

    bringBackSuggest: function(args){

        var $box = _lookup['$target'].parents(".unitBox:first");

        $box.find(":input").each(function(){

            var $input = $(this), inputName = $input.attr("name");

            for (var key in args) {

                var name = (_lookup.pk == key) ? _util.lookupPk(key) : _util.lookupField(key);

                if (name == inputName) {

                    $input.val(args[key]);

                    break;

                }

            }

        });

        var callback = _lookup['callback'];     

        try

        {

            eval_r(callback+"();");

        }

        catch (e){}

    },

    bringBack: function(args){

        $.bringBackSuggest(args);

        $.pdialog.closeCurrent();

    }

});



$.fn.extend({

    lookup: function(){

        return this.each(function(){

            var $this = $(this), options = {mask:true,

                width:$this.attr('width')||820, height:$this.attr('height')||400,

                maxable:eval_r($this.attr("maxable") || "true"),

                resizable:eval_r($this.attr("resizable") || "true")

            };

            $this.click(function(event){

                _lookup = $.extend(_lookup, {

                    currentGroup: $this.attr("lookupGroup") || "",

                    callback: $this.attr("callback"), 

                    suffix: $this.attr("suffix") || "",

                    $target: $this,

                    pk: $this.attr("lookupPk") || "id"

                });

                Var   url    = ($this.attr("href")).replaceTmById($(event.target).parents(".unitBox:first"));

                if (!url.isFinishedTm()) {

                    alertMsg.error($this.attr("warn") || DWZ.msg("alertSelectMsg"));

                    return false;

                }

                $.pdialog.open(url, "_blank", $this.attr("title") || $this.text(), options);

                return false;

            });

        });

    },

})(jQuery);

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个简单的 GRU 网络的 Python 代码示例,用于序列到序列的预测任务: ```python import numpy as np # 定义激活函数 def sigmoid(x): return 1 / (1 + np.exp(-x)) # 定义 GRU 网络类 class GRUNetwork: def __init__(self, input_size, hidden_size, output_size): self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size # 初始化权重 self.Wr = np.random.randn(hidden_size, input_size) self.Wz = np.random.randn(hidden_size, input_size) self.W = np.random.randn(hidden_size, input_size) self.Ur = np.random.randn(hidden_size, hidden_size) self.Uz = np.random.randn(hidden_size, hidden_size) self.U = np.random.randn(hidden_size, hidden_size) self.V = np.random.randn(output_size, hidden_size) # 前向传播 def forward(self, x): T = len(x) h = np.zeros((T+1, self.hidden_size)) r = np.zeros((T, self.hidden_size)) z = np.zeros((T, self.hidden_size)) h[0] = np.zeros(self.hidden_size) for t in range(T): r[t] = sigmoid(np.dot(self.Wr, x[t]) + np.dot(self.Ur, h[t])) z[t] = sigmoid(np.dot(self.Wz, x[t]) + np.dot(self.Uz, h[t])) h[t+1] = (1 - z[t]) * h[t] + z[t] * np.tanh(np.dot(self.W, x[t]) + np.dot(self.U, r[t] * h[t])) y = np.dot(self.V, h[1:].T) return y # 训练模型 def train(self, x_train, y_train, learning_rate=0.1, epochs=100): for epoch in range(epochs): for x, y_true in zip(x_train, y_train): # 前向传播 y_pred = self.forward(x) # 反向传播 dV = np.outer(y_pred - y_true, np.sum(h[1:], axis=0)) dh = np.zeros(self.hidden_size) dW = np.zeros((self.hidden_size, self.input_size)) dU = np.zeros((self.hidden_size, self.hidden_size)) dWr = np.zeros((self.hidden_size, self.input_size)) dUr = np.zeros((self.hidden_size, self.hidden_size)) dWz = np.zeros((self.hidden_size, self.input_size)) dUz = np.zeros((self.hidden_size, self.hidden_size)) for t in reversed(range(len(x))): dy = y_pred - y_true dh = np.dot(self.V.T, dy) + dh dz = dh * (np.tanh(np.dot(self.W, x[t]) + np.dot(self.U, r[t] * h[t])) - h[t]) dU = dz * r[t] * h[t] + dU dW = dz * x[t] + dW dr = dz * h[t] * (1 - r[t]) * r[t] dUr = dr * h[t-1] + dUr dWr = dr * x[t] + dWr dh = dz * r[t] + np.dot(self.U.T, dr) dz = dh * (1 - z[t]) * z[t] dUz = dz * h[t-1] + dUz dWz = dz * x[t] + dWz self.V = self.V - learning_rate * dV self.W = self.W - learning_rate * dW self.U = self.U - learning_rate * dU self.Wr = self.Wr - learning_rate * dWr self.Ur = self.Ur - learning_rate * dUr self.Wz = self.Wz - learning_rate * dWz self.Uz = self.Uz - learning_rate * dUz ``` 以上代码是一个基本的 GRU 网络实现。在训练模型时,我们可以使用反向传播来计算权重的梯度,并使用梯度下降来更新权重。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值