hwspin_lock_timeout

/**
 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
 * @hwlock: the hwspinlock to be locked
 * @to: timeout value in msecs
 *
 * This function locks the underlying @hwlock. If the @hwlock
 * is already taken, the function will busy loop waiting for it to
 * be released, but give up when @timeout msecs have elapsed.
 *
 * Upon a successful return from this function, preemption is disabled
 * so the caller must not sleep, and is advised to release the hwspinlock
 * as soon as possible.
 * This is required in order to minimize remote cores polling on the
 * hardware interconnect.
 *
 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 * busy after @timeout msecs). The function will never sleep.
 */
static inline
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
{
    return __hwspin_lock_timeout(hwlock, to, 0, NULL);
}

int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
                    int mode, unsigned long *flags)
{
    int ret;
    unsigned long expire;

    expire = msecs_to_jiffies(to) + jiffies;

    for (;;) {
        /* Try to take the hwspinlock */
        ret = __hwspin_trylock(hwlock, mode, flags);
        if (ret != -EBUSY)
            break;

        /*
         * The lock is already taken, let's check if the user wants
         * us to try again
         */
        if (time_is_before_eq_jiffies(expire))
            return -ETIMEDOUT;

        /*
         * Allow platform-specific relax handlers to prevent
         * hogging the interconnect (no sleeping, though)
         */
        if (hwlock->bank->ops->relax)
            hwlock->bank->ops->relax(hwlock);
    }

    return ret;
}

int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
    int ret;

    BUG_ON(!hwlock);
    BUG_ON(!flags && mode == HWLOCK_IRQSTATE);

    /*
     * This spin_lock{_irq, _irqsave} serves three purposes:
     *
     * 1. Disable preemption, in order to minimize the period of time
     *    in which the hwspinlock is taken. This is important in order
     *    to minimize the possible polling on the hardware interconnect
     *    by a remote user of this lock.
     * 2. Make the hwspinlock SMP-safe (so we can take it from
     *    additional contexts on the local host).
     * 3. Ensure that in_atomic/might_sleep checks catch potential
     *    problems with hwspinlock usage (e.g. scheduler checks like
     *    'scheduling while atomic' etc.)
     */
    switch (mode) {
    case HWLOCK_IRQSTATE:
        ret = spin_trylock_irqsave(&hwlock->lock, *flags);
        break;
    case HWLOCK_IRQ:
        ret = spin_trylock_irq(&hwlock->lock);
        break;
    case HWLOCK_RAW:
        ret = 1;
        break;
    default:
        ret = spin_trylock(&hwlock->lock);
        break;
    }

    /* is lock already taken by another context on the local cpu ? */
    if (!ret)
        return -EBUSY;

    /* try to take the hwspinlock device */
    ret = hwlock->bank->ops->trylock(hwlock);

    /* if hwlock is already taken, undo spin_trylock_* and exit */
    if (!ret) {
        switch (mode) {
        case HWLOCK_IRQSTATE:
            spin_unlock_irqrestore(&hwlock->lock, *flags);
            break;
        case HWLOCK_IRQ:
            spin_unlock_irq(&hwlock->lock);
            break;
        case HWLOCK_RAW:
            /* Nothing to do */
            break;
        default:
            spin_unlock(&hwlock->lock);
            break;
        }

        return -EBUSY;
    }

    /*
     * We can be sure the other core's memory operations
     * are observable to us only _after_ we successfully take
     * the hwspinlock, and we must make sure that subsequent memory
     * operations (both reads and writes) will not be reordered before
     * we actually took the hwspinlock.
     *
     * Note: the implicit memory barrier of the spinlock above is too
     * early, so we need this additional explicit memory barrier.
     */
    mb();

    return 0;
}

ret = hwspin_lock_register(bank, &pdev->dev, &qcom_hwspinlock_ops,
                   0, QCOM_MUTEX_NUM_LOCKS);

static const struct hwspinlock_ops qcom_hwspinlock_ops = {
    .trylock    = qcom_hwspinlock_trylock,
    .unlock        = qcom_hwspinlock_unlock,
};

static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
{
    struct regmap_field *field = lock->priv;
    u32 lock_owner;
    int ret;

    ret = regmap_field_write(field, QCOM_MUTEX_APPS_PROC_ID);
    if (ret)
        return ret;

    ret = regmap_field_read(field, &lock_owner);
    if (ret)
        return ret;

    return lock_owner == QCOM_MUTEX_APPS_PROC_ID;
}

static void qcom_hwspinlock_unlock(struct hwspinlock *lock)
{
    struct regmap_field *field = lock->priv;
    u32 lock_owner;
    int ret;

    ret = regmap_field_read(field, &lock_owner);
    if (ret) {
        pr_err("%s: unable to query spinlock owner\n", __func__);
        return;
    }

    if (lock_owner != QCOM_MUTEX_APPS_PROC_ID) {
        pr_err("%s: spinlock not owned by us (actual owner is %d)\n",
                __func__, lock_owner);
    }

    ret = regmap_field_write(field, 0);
    if (ret)
        pr_err("%s: failed to unlock spinlock\n", __func__);
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值