__hrtimer_run_queues

int hrtimers_dead_cpu(unsigned int scpu)
{
    struct hrtimer_cpu_base *old_base, *new_base;
    int i;

    BUG_ON(cpu_online(scpu));
    tick_cancel_sched_timer(scpu);

    /*
     * this BH disable ensures that raise_softirq_irqoff() does
     * not wakeup ksoftirqd (and acquire the pi-lock) while
     * holding the cpu_base lock
     */
    local_bh_disable();
    local_irq_disable();
    old_base = &per_cpu(hrtimer_bases, scpu);
    new_base = this_cpu_ptr(&hrtimer_bases);
    /*
     * The caller is globally serialized and nobody else
     * takes two locks at once, deadlock is not possible.
     */
    raw_spin_lock(&new_base->lock);
    raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

    for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
        migrate_hrtimer_list(&old_base->clock_base[i],
                     &new_base->clock_base[i]);
    }

    /*
     * The migration might have changed the first expiring softirq
     * timer on this CPU. Update it.
     */
    hrtimer_update_softirq_timer(new_base, false);

    raw_spin_unlock(&old_base->lock);
    raw_spin_unlock(&new_base->lock);

    /* Check, if we got expired work to do */
    __hrtimer_peek_ahead_timers();
    local_irq_enable();
    local_bh_enable();
    return 0;
}

/* called with interrupts disabled */
static inline void __hrtimer_peek_ahead_timers(void)
{
    struct tick_device *td;

    if (!hrtimer_hres_active())
        return;

    td = this_cpu_ptr(&tick_cpu_device);
    if (td && td->evtdev)
        hrtimer_interrupt(td->evtdev);
}

/*
 * High resolution timer interrupt
 * Called with interrupts disabled

 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
    struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
    ktime_t expires_next, now, entry_time, delta;
    unsigned long flags;
    int retries = 0;

    BUG_ON(!cpu_base->hres_active);
    cpu_base->nr_events++;
    dev->next_event = KTIME_MAX;

    raw_spin_lock_irqsave(&cpu_base->lock, flags);
    entry_time = now = hrtimer_update_base(cpu_base);
retry:
    cpu_base->in_hrtirq = 1;
    /*
     * We set expires_next to KTIME_MAX here with cpu_base->lock
     * held to prevent that a timer is enqueued in our queue via
     * the migration code. This does not affect enqueueing of
     * timers which run their callback and need to be requeued on
     * this CPU.
     */
    cpu_base->expires_next = KTIME_MAX;

    if (!ktime_before(now, cpu_base->softirq_expires_next)) {
        cpu_base->softirq_expires_next = KTIME_MAX;
        cpu_base->softirq_activated = 1;
        raise_softirq_irqoff(HRTIMER_SOFTIRQ);
    }

    __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);

    /* Reevaluate the clock bases for the [soft] next expiry */
    expires_next = hrtimer_update_next_event(cpu_base);
    /*
     * Store the new expiry value so the migration code can verify
     * against it.
     */
    cpu_base->expires_next = expires_next;
    cpu_base->in_hrtirq = 0;
    raw_spin_unlock_irqrestore(&cpu_base->lock, flags);

    /* Reprogramming necessary ? */
    if (!tick_program_event(expires_next, 0)) {
        cpu_base->hang_detected = 0;
        return;
    }

    /*
     * The next timer was already expired due to:
     * - tracing
     * - long lasting callbacks
     * - being scheduled away when running in a VM
     *
     * We need to prevent that we loop forever in the hrtimer
     * interrupt routine. We give it 3 attempts to avoid
     * overreacting on some spurious event.
     *
     * Acquire base lock for updating the offsets and retrieving
     * the current time.
     */
    raw_spin_lock_irqsave(&cpu_base->lock, flags);
    now = hrtimer_update_base(cpu_base);
    cpu_base->nr_retries++;
    if (++retries < 3)
        goto retry;
    /*
     * Give the system a chance to do something else than looping
     * here. We stored the entry time, so we know exactly how long
     * we spent here. We schedule the next event this amount of
     * time away.
     */
    cpu_base->nr_hangs++;
    cpu_base->hang_detected = 1;
    raw_spin_unlock_irqrestore(&cpu_base->lock, flags);

    delta = ktime_sub(now, entry_time);
    if ((unsigned int)delta > cpu_base->max_hang_time)
        cpu_base->max_hang_time = (unsigned int) delta;
    /*
     * Limit it to a sensible value as we enforce a longer
     * delay. Give the CPU at least 100ms to catch up.
     */
    if (delta > 100 * NSEC_PER_MSEC)
        expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
    else
        expires_next = ktime_add(now, delta);
    tick_program_event(expires_next, 1);
    pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
}

void __init hrtimers_init(void)
{
    hrtimers_prepare_cpu(smp_processor_id());
    open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
}

static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
{
    struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
    unsigned long flags;
    ktime_t now;

    hrtimer_cpu_base_lock_expiry(cpu_base);
    raw_spin_lock_irqsave(&cpu_base->lock, flags);

    now = hrtimer_update_base(cpu_base);
    __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);

    cpu_base->softirq_activated = 0;
    hrtimer_update_softirq_timer(cpu_base, true);

    raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
    hrtimer_cpu_base_unlock_expiry(cpu_base);
}

static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
                 unsigned long flags, unsigned int active_mask)
{
    struct hrtimer_clock_base *base;
    unsigned int active = cpu_base->active_bases & active_mask;

    for_each_active_base(base, cpu_base, active) {
        struct timerqueue_node *node;
        ktime_t basenow;

        basenow = ktime_add(now, base->offset);

        while ((node = timerqueue_getnext(&base->active))) {
            struct hrtimer *timer;

            timer = container_of(node, struct hrtimer, node);

            /*
             * The immediate goal for using the softexpires is
             * minimizing wakeups, not running timers at the
             * earliest interrupt after their soft expiration.
             * This allows us to avoid using a Priority Search
             * Tree, which can answer a stabbing querry for
             * overlapping intervals and instead use the simple
             * BST we already have.
             * We don't add extra wakeups by delaying timers that
             * are right-of a not yet expired timer, because that
             * timer will have to trigger a wakeup anyway.
             */
            if (basenow < hrtimer_get_softexpires_tv64(timer))
                break;

            __run_hrtimer(cpu_base, base, timer, &basenow, flags);
            if (active_mask == HRTIMER_ACTIVE_SOFT)
                hrtimer_sync_wait_running(cpu_base, flags);
        }
    }
}

/*
 * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
 * distinct sections:
 *
 *  - queued:    the timer is queued
 *  - callback:    the timer is being ran
 *  - post:    the timer is inactive or (re)queued
 *
 * On the read side we ensure we observe timer->state and cpu_base->running
 * from the same section, if anything changed while we looked at it, we retry.
 * This includes timer->base changing because sequence numbers alone are
 * insufficient for that.
 *
 * The sequence numbers are required because otherwise we could still observe
 * a false negative if the read side got smeared over multiple consequtive
 * __run_hrtimer() invocations.
 */

static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
              struct hrtimer_clock_base *base,
              struct hrtimer *timer, ktime_t *now,
              unsigned long flags)
{
    enum hrtimer_restart (*fn)(struct hrtimer *);
    int restart;

    lockdep_assert_held(&cpu_base->lock);

    debug_deactivate(timer);
    base->running = timer;

    /*
     * Separate the ->running assignment from the ->state assignment.
     *
     * As with a regular write barrier, this ensures the read side in
     * hrtimer_active() cannot observe base->running == NULL &&
     * timer->state == INACTIVE.
     */
    raw_write_seqcount_barrier(&base->seq);

    __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
    fn = timer->function;

    /*
     * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
     * timer is restarted with a period then it becomes an absolute
     * timer. If its not restarted it does not matter.
     */
    if (IS_ENABLED(CONFIG_TIME_LOW_RES))
        timer->is_rel = false;

    /*
     * The timer is marked as running in the CPU base, so it is
     * protected against migration to a different CPU even if the lock
     * is dropped.
     */
    raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
    trace_hrtimer_expire_entry(timer, now);
    restart = fn(timer);
    trace_hrtimer_expire_exit(timer);
    raw_spin_lock_irq(&cpu_base->lock);

    /*
     * Note: We clear the running state after enqueue_hrtimer and
     * we do not reprogram the event hardware. Happens either in
     * hrtimer_start_range_ns() or in hrtimer_interrupt()
     *
     * Note: Because we dropped the cpu_base->lock above,
     * hrtimer_start_range_ns() can have popped in and enqueued the timer
     * for us already.
     */
    if (restart != HRTIMER_NORESTART &&
        !(timer->state & HRTIMER_STATE_ENQUEUED))
        enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);

    /*
     * Separate the ->running assignment from the ->state assignment.
     *
     * As with a regular write barrier, this ensures the read side in
     * hrtimer_active() cannot observe base->running.timer == NULL &&
     * timer->state == INACTIVE.
     */
    raw_write_seqcount_barrier(&base->seq);

    WARN_ON_ONCE(base->running != timer);
    base->running = NULL;
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值