nuttx enter_critical_section

13 篇文章 0 订阅
12 篇文章 2 订阅

When we call a function like nxsig_timeout, it will first call the funtion enter_critical_section, then call
up_unblock_task, but up_unblock_task will triger a ecall irq, After finish it’s handlation, will call
leave_critical_section leave the function nxsig_timeout.

When I first see the function enter_critical_section, I start to suspious the indication of this function,
because it will disable all cpus irq, but up_unblock_task will really trigger a ecall irq to CPU, Why
will lead to this situation.

1, We should first to check the function enter_critical_section, enter_critical_section will call up_irq_save.

/****************************************************************************
 * Name: enter_critical_section
 *
 * Description:
 *   Take the CPU IRQ lock and disable interrupts on all CPUs.  A thread-
 *   specific counter is increment to indicate that the thread has IRQs
 *   disabled and to support nested calls to enter_critical_section().
 *
 ****************************************************************************/

#ifdef CONFIG_SMP
irqstate_t enter_critical_section(void)
{
  FAR struct tcb_s *rtcb;
  irqstate_t ret;
  int cpu;

  /* Disable interrupts.
   *
   * NOTE 1: Ideally this should disable interrupts on all CPUs, but most
   * architectures only support disabling interrupts on the local CPU.
   * NOTE 2: Interrupts may already be disabled, but we call up_irq_save()
   * unconditionally because we need to return valid interrupt status in any
   * event.
   * NOTE 3: We disable local interrupts BEFORE taking the spinlock in order
   * to prevent possible waits on the spinlock from interrupt handling on
   * the local CPU.
   */

try_again:

  ret = up_irq_save();

  /* Verify that the system has sufficiently initialized so that the task
   * lists are valid.
   */

  if (g_nx_initstate >= OSINIT_TASKLISTS)
    {
      /* If called from an interrupt handler, then just take the spinlock.
       * If we are already in a critical section, this will lock the CPU
       * in the interrupt handler.  Sounds worse than it is.
       */

      if (up_interrupt_context())
        {
          /* We are in an interrupt handler.  How can this happen?
           *
           *   1. We were not in a critical section when the interrupt
           *      occurred.  In this case, the interrupt was entered with:
           *
           *      g_cpu_irqlock = SP_UNLOCKED.
           *      g_cpu_nestcount = 0
           *      All CPU bits in g_cpu_irqset should be zero
           *
           *   2. We were in a critical section and interrupts on this
           *      this CPU were disabled -- this is an impossible case.
           *
           *   3. We were in critical section, but up_irq_save() only
           *      disabled local interrupts on a different CPU;
           *      Interrupts could still be enabled on this CPU.
           *
           *      g_cpu_irqlock = SP_LOCKED.
           *      g_cpu_nestcount = 0
           *      The bit in g_cpu_irqset for this CPU should be zero
           *
           *   4. An extension of 3 is that we may be re-entered numerous
           *      times from the same interrupt handler.  In that case:
           *
           *      g_cpu_irqlock = SP_LOCKED.
           *      g_cpu_nestcount > 0
           *      The bit in g_cpu_irqset for this CPU should be zero
           *
           * NOTE: However, the interrupt entry conditions can change due
           * to previous processing by the interrupt handler that may
           * instantiate a new thread that has irqcount > 0 and may then
           * set the bit in g_cpu_irqset and g_cpu_irqlock = SP_LOCKED
           */

          /* Handle nested calls to enter_critical_section() from the same
           * interrupt.
           */

          cpu = this_cpu();
          if (g_cpu_nestcount[cpu] > 0)
            {
              DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
                          g_cpu_nestcount[cpu] < UINT8_MAX);
              g_cpu_nestcount[cpu]++;
            }

          /* This is the first call to enter_critical_section from the
           * interrupt handler.
           */

          else
            {
              /* Make sure that the g_cpu_irqlock() was not already set
               * by previous logic on this CPU that was executed by the
               * interrupt handler.  We know that the bit in g_cpu_irqset
               * for this CPU was zero on entry into the interrupt handler,
               * so if it is non-zero now then we know that was the case.
               */

              if ((g_cpu_irqset & (1 << cpu)) == 0)
                {
                  /* Wait until we can get the spinlock (meaning that we are
                   * no longer blocked by the critical section).
                   */

                  if (!irq_waitlock(cpu))
                    {
                      /* We are in a deadlock condition due to a pending
                       * pause request interrupt request.  Break the
                       * deadlock by handling the pause interrupt now.
                       */

                      DEBUGVERIFY(up_cpu_paused(cpu));
                    }
                }

              /* In any event, the nesting count is now one */

              g_cpu_nestcount[cpu] = 1;

              /* Also set the CPU bit so that other CPUs will be aware that this
               * CPU holds the critical section.
               */

              spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                          &g_cpu_irqlock);
            }
        }
      else
        {
          /* Normal tasking environment. */
          /* Get the TCB of the currently executing task on this CPU (avoid
           * using this_task() which can recurse.
           */

          cpu  = this_cpu();
          rtcb = current_task(cpu);
          DEBUGASSERT(rtcb != NULL);

          /* Do we already have interrupts disabled? */

          if (rtcb->irqcount > 0)
            {
              /* Yes... make sure that the spinlock is set and increment the
               * IRQ lock count.
               *
               * NOTE: If irqcount > 0 then (1) we are in a critical section, and
               * (2) this CPU should hold the lock.
               */

              DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
                          (g_cpu_irqset & (1 << this_cpu())) != 0 &&
                          rtcb->irqcount < INT16_MAX);
              rtcb->irqcount++;
            }
          else
            {
              /* If we get here with irqcount == 0, then we know that the
               * current task running on this CPU is not in a critical
               * section.  However other tasks on other CPUs may be in a
               * critical section.  If so, we must wait until they release
               * the spinlock.
               */

              DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);

              if (!irq_waitlock(cpu))
                {
                  /* We are in a deadlock condition due to a pending pause
                   * request interrupt.  Re-enable interrupts on this CPU
                   * and try again.  Briefly re-enabling interrupts should
                   * be sufficient to permit processing the pending pause
                   * request.
                   *
                   * NOTE: This should never happen on architectures like
                   * the Cortex-A; the inter-CPU interrupt (SGI) is not
                   * maskable.
                   */

                  up_irq_restore(ret);
                  goto try_again;
                }

              /* The set the lock count to 1.
               *
               * Interrupts disables must follow a stacked order.  We
               * cannot other context switches to re-order the enabling
               * disabling of interrupts.
               *
               * The scheduler accomplishes this by treating the irqcount
               * like lockcount:  Both will disable pre-emption.
               */

              spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                          &g_cpu_irqlock);
              rtcb->irqcount = 1;

              /* Note that we have entered the critical section */

#ifdef CONFIG_SCHED_CRITMONITOR
              sched_critmon_csection(rtcb, true);
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
              sched_note_csection(rtcb, true);
#endif
            }
        }
    }

  /* Return interrupt status */

  return ret;
}

#else

irqstate_t enter_critical_section(void)
{
  irqstate_t ret;

  /* Disable interrupts */

  ret = up_irq_save();

  /* Check if we were called from an interrupt handler and that the task
   * lists have been initialized.
   */

  if (!up_interrupt_context() && g_nx_initstate >= OSINIT_TASKLISTS)
    {
      FAR struct tcb_s *rtcb = this_task();
      DEBUGASSERT(rtcb != NULL);

      /* Have we just entered the critical section?  Or is this a nested
       * call to enter_critical_section.
       */

      DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < UINT16_MAX);
      if (++rtcb->irqcount == 1)
        {
          /* Note that we have entered the critical section */

#ifdef CONFIG_SCHED_CRITMONITOR
          sched_critmon_csection(rtcb, true);
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
          sched_note_csection(rtcb, true);
#endif
        }
    }

  /* Return interrupt status */

  return ret;
}
#endif
irqstate_t weak_function up_irq_save(void)                                                                                                                                  
{
  irqstate_t flags;

  __asm__ volatile("csrrci %0, mstatus, %1" : "=r"(flags) : "i"(0x8));
  return flags & 0x8;
}

在这里插入图片描述
As the register shows, when we call up_irq_save cpu can only disable Machine external interrupts, other type of interrupt will still enable to be triggered.

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值