plda源码(十二)

plda源码(十二)

LightLDA

原始 Gibbs Sampling 采样函数如下:
p ( z d i = k ∣ r e s t ) ∝ ( n k d − d i + α k ) ( n k w − d i + β w ) n k − d i + β ‾ p(z_{di}=k | rest) ∝ \frac{(n^{−di}_{kd}+\alpha_k)(n^{−di}_{kw}+\beta_w)}{n^{−di}_k+\overline{\beta}} p(zdi=krest)nkdi+β(nkddi+αk)(nkwdi+βw)

AliasLDA
p ( z d i = k ∣ r e s t ) ∝ n k d − d i ( n k w − d i + β w ) n k − d i + β ‾ + α k ( n k w − d i + β w ) n k − d i + β ‾ p(z_{di}=k | rest) ∝ \frac{n^{−di}_{kd}(n^{−di}_{kw}+\beta_w)}{n^{−di}_k+\overline{\beta}} + \frac{\alpha_k(n^{−di}_{kw}+\beta_w)}{n^{−di}_k+\overline{\beta}} p(zdi=krest)nkdi+βnkddi(nkwdi+βw)+nkdi+βαk(nkwdi+βw)
第二项可以看做“topic-word”桶,与文档无关。这一项可以通过Alias Table和 Metropolis-Hastings(一种蒙特卡洛采样方法) 进行O(1) 时间复杂度采样。Alias Table在上一篇文章有介绍。

LightLDA
p ( z d i = k ∣ r e s t ) ∝ ( n k d − d i + α k ) ∗ ( n k w − d i + β w ) n k − d i + β ‾ p(z_{di}=k | rest) ∝ (n^{−di}_{kd}+\alpha_k) * \frac{(n^{−di}_{kw}+\beta_w)}{n^{−di}_k+\overline{\beta}} p(zdi=krest)(nkddi+αk)nkdi+β(nkwdi+βw)
q ( z d i = k ∣ r e s t ) ∝ ( n k d + α k ) ∗ n k w + β w n k + β ‾ q(z_{di}=k | rest) \propto (n_{kd} + \alpha_{k}) * \frac{n_{kw} + \beta_w}{n_k + \overline{\beta}} q(zdi=krest)(nkd+αk)nk+βnkw+βw
第一项为doc-proposal,第二项为word-proposal。
同样退化成MH采样
m i n { 1 , p ( t ) q ( t → s ) p ( s ) q ( s → t ) } min\{ 1, \frac{p(t)q(t \rightarrow s)}{p(s)q(s\rightarrow t)} \} min{1,p(s)q(st)p(t)q(ts)}

doc-proposal
q = p d ( k ) ∝ n k d + α k q = p_d(k) \propto n_{kd}+\alpha_k q=pd(k)nkd+αk
接受率
π d = ( n t d − d i + α t ) ( n t w − d i + β w ) ( n s − d i + β ‾ ) ( n s d + α s ) ( n s d − d i + α s ) ( n s w − d i + β w ) ( n t − d i + β ‾ ) ( n t d + α t ) \pi_d = \frac{ (n^{−di}_{td}+\alpha_t)(n^{−di}_{tw}+\beta_w)(n^{−di}_s+\overline{\beta})(n_{sd}+\alpha_s)}{ (n^{−di}_{sd}+\alpha_s)(n^{−di}_{sw}+\beta_w)(n^{−di}_t+\overline{\beta})(n_{td}+\alpha_t)} πd=(nsddi+αs)(nswdi+βw)(ntdi+β)(ntd+αt)(ntddi+αt)(ntwdi+βw)(nsdi+β)(nsd+αs)

        int K = model_->num_topics();
        double sumPd = document->GetDocumentLength() + Kalpha;
        for (...) {
            int w = iterator.Word();
            int topic = iterator.Topic();
            int new_topic;
            int old_topic = topic;
            
                {
                    // Draw a topic from doc-proposal
                    double u = random->RandDouble() * sumPd;
                    if (u < document->GetDocumentLength()) {
                        // draw from doc-topic distribution skipping n
                        unsigned pos = (unsigned) (u);
                        new_topic = document->topics().wordtopics(pos);
                    } else {
                        // draw uniformly
                        u -= document->GetDocumentLength();
                        u = u / alpha_;
                        new_topic = (unsigned short) (u); // pick_a_number(0,trngdata->docs[m]->length-1); (int)(utils::unif01()*ptrndata->docs[m]->length);
                    }

                    if (topic != new_topic) {
                        //2. Find acceptance probability
                        int ajustment_old = topic == old_topic? -1 : 0;
                        int ajustment_new = new_topic == old_topic? -1 : 0;
                        double temp_old = ComputeProbForK(document, w, topic, ajustment_old);
                        double temp_new = ComputeProbForK(document, w, new_topic, ajustment_new);
                        double prop_old = (N_DK(document, topic) + alpha_);
                        double prop_new =  (N_DK(document, new_topic) + alpha_);
                        double acceptance = (temp_new * prop_old) / (temp_old * prop_new);

                        //3. Compare against uniform[0,1]
                        if (random->RandDouble() < acceptance) {
                            topic = new_topic;
                        }
                    }

其中的ComputeProbForK是

double ComputeProbForK(LDADocument* document, int w, int topic,
            int ajustment) {
      return  (N_DK(document, topic) + alpha_ + ajustment)
                * (N_WK(w, topic) + beta_ + ajustment)
                / (N_K(topic) + Vbeta + ajustment);
  }

word-proposal
q = p d ( k ) ∝ n k w + β w n k + β ‾ q = p_d(k) \propto \frac{n_{kw} + \beta_w}{n_k + \overline{\beta}} q=pd(k)nk+βnkw+βw
接受率
π w = ( n t d − d i + α t ) ( n t w − d i + β w ) ( n s − d i + β ‾ ) ( n s w + β w ) ( n t + β ‾ ) ( n s d − d i + α s ) ( n s w − d i + β w ) ( n t − d i + β ‾ ) ( n t w + β w ) ( n s + β ‾ ) \pi_w = \frac{ (n^{−di}_{td}+\alpha_t)(n^{−di}_{tw}+\beta_w)(n^{−di}_s+\overline{\beta})(n_{sw} + \beta_w)(n_t + \overline{\beta})}{ (n^{−di}_{sd}+\alpha_s)(n^{−di}_{sw}+\beta_w)(n^{−di}_t+\overline{\beta})(n_{tw} + \beta_w)(n_s + \overline{\beta})} πw=(nsddi+αs)(nswdi+βw)(ntdi+β)(ntw+βw)(ns+β)(ntddi+αt)(ntwdi+βw)(nsdi+β)(nsw+βw)(nt+β)

{
                    // Draw a topic from word-proposal
                    q[w].noSamples++;
                    if (q[w].noSamples > qtable_construct_frequency) {
                        GenerateQTable(w);
                    }
                    new_topic = q[w].sample(random->RandInt(K), random->RandDouble());
                    if (topic != new_topic) {
                        //2. Find acceptance probability
                        int ajustment_old = topic == old_topic? -1 : 0;
                        int ajustment_new = new_topic == old_topic? -1 : 0;
                        double temp_old = ComputeProbForK(document, w, topic, ajustment_old);
                        double temp_new = ComputeProbForK(document, w, new_topic, ajustment_new);
                        double acceptance = (temp_new * q[w].w[topic]) / (temp_old * q[w].w[new_topic]);

                        //3. Compare against uniform[0,1]
                        if (random->RandDouble() < acceptance) {
                            topic = new_topic;
                        }
                    }
                }

其中GenerateQTable如下

    void GenerateQTable(unsigned int w) {
        int num_topics = model_->num_topics();
        q[w].wsum = 0.0;
        const TopicDistribution<int32>& word_distribution = model_->GetWordTopicDistribution(w);
        const TopicDistribution<int32>& n_k = model_->GetGlobalTopicDistribution();
        for (int k = 0; k < num_topics; ++k) {
            q[w].w[k] =   (word_distribution[k] + beta_) / (n_k[k] + Vbeta);
            q[w].wsum += q[w].w[k];
        }
        q[w].constructTable();
    }
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值