send_sig

这段代码描述了在Linux内核中如何发送和处理信号的过程,包括检查信号的有效性、队列信号、处理权限以及唤醒接收信号的进程。主要函数有send_sig_info、do_send_sig_info和complete_signal,涉及进程间通信和调度策略。
摘要由CSDN通过智能技术生成

int
send_sig(int sig, struct task_struct *p, int priv)
{
    return send_sig_info(sig, __si_special(priv), p);
}

/*
 * These are for backward compatibility with the rest of the kernel source.
 */

int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
{
    /*
     * Make sure legacy kernel users don't send in bad values
     * (normal paths check this in check_kill_permission).
     */
    if (!valid_signal(sig))
        return -EINVAL;

    return do_send_sig_info(sig, info, p, PIDTYPE_PID);
}

int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
            enum pid_type type)
{
    unsigned long flags;
    int ret = -ESRCH;

    if (lock_task_sighand(p, &flags)) {
        ret = send_signal(sig, info, p, type);
        unlock_task_sighand(p, &flags);
    }

    return ret;
}

static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
            enum pid_type type)
{
    /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
    bool force = false;

    if (info == SEND_SIG_NOINFO) {
        /* Force if sent from an ancestor pid namespace */
        force = !task_pid_nr_ns(current, task_active_pid_ns(t));
    } else if (info == SEND_SIG_PRIV) {
        /* Don't ignore kernel generated signals */
        force = true;
    } else if (has_si_pid_and_uid(info)) {
        /* SIGKILL and SIGSTOP is special or has ids */
        struct user_namespace *t_user_ns;

        rcu_read_lock();
        t_user_ns = task_cred_xxx(t, user_ns);
        if (current_user_ns() != t_user_ns) {
            kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
            info->si_uid = from_kuid_munged(t_user_ns, uid);
        }
        rcu_read_unlock();

        /* A kernel generated signal? */
        force = (info->si_code == SI_KERNEL);

        /* From an ancestor pid namespace? */
        if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
            info->si_pid = 0;
            force = true;
        }
    }
    return __send_signal(sig, info, t, type, force);
}

static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
            enum pid_type type, bool force)
{
    struct sigpending *pending;
    struct sigqueue *q;
    int override_rlimit;
    int ret = 0, result;

    assert_spin_locked(&t->sighand->siglock);

    result = TRACE_SIGNAL_IGNORED;
    if (!prepare_signal(sig, t, force))
        goto ret;

    pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
    /*
     * Short-circuit ignored signals and support queuing
     * exactly one non-rt signal, so that we can get more
     * detailed information about the cause of the signal.
     */
    result = TRACE_SIGNAL_ALREADY_PENDING;
    if (legacy_queue(pending, sig))
        goto ret;

    result = TRACE_SIGNAL_DELIVERED;
    /*
     * Skip useless siginfo allocation for SIGKILL and kernel threads.
     */
    if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
        goto out_set;

    /*
     * Real-time signals must be queued if sent by sigqueue, or
     * some other real-time mechanism.  It is implementation
     * defined whether kill() does so.  We attempt to do so, on
     * the principle of least surprise, but since kill is not
     * allowed to fail with EAGAIN when low on memory we just
     * make sure at least one signal gets delivered and don't
     * pass on the info struct.
     */
    if (sig < SIGRTMIN)
        override_rlimit = (is_si_special(info) || info->si_code >= 0);
    else
        override_rlimit = 0;

    q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
    if (q) {
        list_add_tail(&q->list, &pending->list);
        switch ((unsigned long) info) {
        case (unsigned long) SEND_SIG_NOINFO:
            clear_siginfo(&q->info);
            q->info.si_signo = sig;
            q->info.si_errno = 0;
            q->info.si_code = SI_USER;
            q->info.si_pid = task_tgid_nr_ns(current,
                            task_active_pid_ns(t));
            rcu_read_lock();
            q->info.si_uid =
                from_kuid_munged(task_cred_xxx(t, user_ns),
                         current_uid());
            rcu_read_unlock();
            break;
        case (unsigned long) SEND_SIG_PRIV:
            clear_siginfo(&q->info);
            q->info.si_signo = sig;
            q->info.si_errno = 0;
            q->info.si_code = SI_KERNEL;
            q->info.si_pid = 0;
            q->info.si_uid = 0;
            break;
        default:
            copy_siginfo(&q->info, info);
            break;
        }
    } else if (!is_si_special(info) &&
           sig >= SIGRTMIN && info->si_code != SI_USER) {
        /*
         * Queue overflow, abort.  We may abort if the
         * signal was rt and sent by user using something
         * other than kill().
         */
        result = TRACE_SIGNAL_OVERFLOW_FAIL;
        ret = -EAGAIN;
        goto ret;
    } else {
        /*
         * This is a silent loss of information.  We still
         * send the signal, but the *info bits are lost.
         */
        result = TRACE_SIGNAL_LOSE_INFO;
    }

out_set:
    signalfd_notify(t, sig);
    sigaddset(&pending->signal, sig);

    /* Let multiprocess signals appear after on-going forks */
    if (type > PIDTYPE_TGID) {
        struct multiprocess_signals *delayed;
        hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
            sigset_t *signal = &delayed->signal;
            /* Can't queue both a stop and a continue signal */
            if (sig == SIGCONT)
                sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
            else if (sig_kernel_stop(sig))
                sigdelset(signal, SIGCONT);
            sigaddset(signal, sig);
        }
    }

    complete_signal(sig, t, type);
ret:
    trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
    return ret;
}

static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
{
    struct signal_struct *signal = p->signal;
    struct task_struct *t;

    /*
     * Now find a thread we can wake up to take the signal off the queue.
     *
     * If the main thread wants the signal, it gets first crack.
     * Probably the least surprising to the average bear.
     */
    if (wants_signal(sig, p))
        t = p;
    else if ((type == PIDTYPE_PID) || thread_group_empty(p))
        /*
         * There is just one thread and it does not need to be woken.
         * It will dequeue unblocked signals before it runs again.
         */
        return;
    else {
        /*
         * Otherwise try to find a suitable thread.
         */
        t = signal->curr_target;
        while (!wants_signal(sig, t)) {
            t = next_thread(t);
            if (t == signal->curr_target)
                /*
                 * No thread needs to be woken.
                 * Any eligible threads will see
                 * the signal in the queue soon.
                 */
                return;
        }
        signal->curr_target = t;
    }

    /*
     * Found a killable thread.  If the signal will be fatal,
     * then start taking the whole group down immediately.
     */
    if (sig_fatal(p, sig) &&
        !(signal->flags & SIGNAL_GROUP_EXIT) &&
        !sigismember(&t->real_blocked, sig) &&
        (sig == SIGKILL || !p->ptrace)) {
        /*
         * This signal will be fatal to the whole group.
         */
        if (!sig_kernel_coredump(sig)) {
            /*
             * Start a group exit and wake everybody up.
             * This way we don't have other threads
             * running and doing things after a slower
             * thread has the fatal signal pending.
             */
            signal->flags = SIGNAL_GROUP_EXIT;
            signal->group_exit_code = sig;
            signal->group_stop_count = 0;
            t = p;
            do {
                task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                sigaddset(&t->pending.signal, SIGKILL);
                signal_wake_up(t, 1);
            } while_each_thread(p, t);
            return;
        }
    }

    /*
     * The signal is already in the shared-pending queue.
     * Tell the chosen thread to wake up and dequeue it.
     */
    signal_wake_up(t, sig == SIGKILL);
    return;
}

static inline void signal_wake_up(struct task_struct *t, bool resume)
{
    signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}

/*
 * Tell a process that it has a new active signal..
 *
 * NOTE! we rely on the previous spin_lock to
 * lock interrupts for us! We can only be called with
 * "siglock" held, and the local interrupt must
 * have been disabled when that got acquired!
 *
 * No need to set need_resched since signal event passing
 * goes through ->blocked
 */
void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
    set_tsk_thread_flag(t, TIF_SIGPENDING);
    /*
     * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
     * case. We don't check t->state here because there is a race with it
     * executing another processor and just now entering stopped state.
     * By using wake_up_state, we ensure the process will wake up and
     * handle its death signal.
     */
    if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
        kick_process(t);
}

int wake_up_state(struct task_struct *p, unsigned int state)
{
    return try_to_wake_up(p, state, 0);
}


/**
 * try_to_wake_up - wake up a thread
 * @p: the thread to be awakened
 * @state: the mask of task states that can be woken
 * @wake_flags: wake modifier flags (WF_*)
 *
 * If (@state & @p->state) @p->state = TASK_RUNNING.
 *
 * If the task was not queued/runnable, also place it back on a runqueue.
 *
 * Atomic against schedule() which would dequeue a task, also see
 * set_current_state().
 *
 * This function executes a full memory barrier before accessing the task
 * state; see set_current_state().
 *
 * Return: %true if @p->state changes (an actual wakeup was done),
 *       %false otherwise.
 */
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
    unsigned long flags;
    int cpu, success = 0;

    preempt_disable();
    if (p == current) {
        /*
         * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
         * == smp_processor_id()'. Together this means we can special
         * case the whole 'p->on_rq && ttwu_remote()' case below
         * without taking any locks.
         *
         * In particular:
         *  - we rely on Program-Order guarantees for all the ordering,
         *  - we're serialized against set_special_state() by virtue of
         *    it disabling IRQs (this allows not taking ->pi_lock).
         */
        if (!(p->state & state))
            goto out;

        success = 1;
        cpu = task_cpu(p);
        trace_sched_waking(p);
        p->state = TASK_RUNNING;
        trace_sched_wakeup(p);
        goto out;
    }

    /*
     * If we are going to wake up a thread waiting for CONDITION we
     * need to ensure that CONDITION=1 done by the caller can not be
     * reordered with p->state check below. This pairs with mb() in
     * set_current_state() the waiting thread does.
     */
    raw_spin_lock_irqsave(&p->pi_lock, flags);
    smp_mb__after_spinlock();
    if (!(p->state & state))
        goto unlock;

    trace_sched_waking(p);

    /* We're going to change ->state: */
    success = 1;
    cpu = task_cpu(p);

    /*
     * Ensure we load p->on_rq _after_ p->state, otherwise it would
     * be possible to, falsely, observe p->on_rq == 0 and get stuck
     * in smp_cond_load_acquire() below.
     *
     * sched_ttwu_pending()            try_to_wake_up()
     *   STORE p->on_rq = 1              LOAD p->state
     *   UNLOCK rq->lock
     *
     * __schedule() (switch to task 'p')
     *   LOCK rq->lock              smp_rmb();
     *   smp_mb__after_spinlock();
     *   UNLOCK rq->lock
     *
     * [task p]
     *   STORE p->state = UNINTERRUPTIBLE      LOAD p->on_rq
     *
     * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
     * __schedule().  See the comment for smp_mb__after_spinlock().
     */
    smp_rmb();
    if (p->on_rq && ttwu_remote(p, wake_flags))
        goto unlock;

#ifdef CONFIG_SMP
    /*
     * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
     * possible to, falsely, observe p->on_cpu == 0.
     *
     * One must be running (->on_cpu == 1) in order to remove oneself
     * from the runqueue.
     *
     * __schedule() (switch to task 'p')    try_to_wake_up()
     *   STORE p->on_cpu = 1          LOAD p->on_rq
     *   UNLOCK rq->lock
     *
     * __schedule() (put 'p' to sleep)
     *   LOCK rq->lock              smp_rmb();
     *   smp_mb__after_spinlock();
     *   STORE p->on_rq = 0              LOAD p->on_cpu
     *
     * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
     * __schedule().  See the comment for smp_mb__after_spinlock().
     */
    smp_rmb();

    /*
     * If the owning (remote) CPU is still in the middle of schedule() with
     * this task as prev, wait until its done referencing the task.
     *
     * Pairs with the smp_store_release() in finish_task().
     *
     * This ensures that tasks getting woken will be fully ordered against
     * their previous state and preserve Program Order.
     */
    smp_cond_load_acquire(&p->on_cpu, !VAL);

    p->sched_contributes_to_load = !!task_contributes_to_load(p);
    p->state = TASK_WAKING;

    if (p->in_iowait) {
        delayacct_blkio_end(p);
        atomic_dec(&task_rq(p)->nr_iowait);
    }

    cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
    if (task_cpu(p) != cpu) {
        wake_flags |= WF_MIGRATED;
        psi_ttwu_dequeue(p);
        set_task_cpu(p, cpu);
    }

#else /* CONFIG_SMP */

    if (p->in_iowait) {
        delayacct_blkio_end(p);
        atomic_dec(&task_rq(p)->nr_iowait);
    }

#endif /* CONFIG_SMP */

    ttwu_queue(p, cpu, wake_flags);
unlock:
    raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out:
    if (success)
        ttwu_stat(p, cpu, wake_flags);
    preempt_enable();

    return success;
}

/*
 * Called in case the task @p isn't fully descheduled from its runqueue,
 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
 * since all we need to do is flip p->state to TASK_RUNNING, since
 * the task is still ->on_rq.
 */
static int ttwu_remote(struct task_struct *p, int wake_flags)
{
    struct rq_flags rf;
    struct rq *rq;
    int ret = 0;

    rq = __task_rq_lock(p, &rf);
    if (task_on_rq_queued(p)) {
        /* check_preempt_curr() may use rq clock */
        update_rq_clock(rq);
        ttwu_do_wakeup(rq, p, wake_flags, &rf);
        ret = 1;
    }
    __task_rq_unlock(rq, &rf);

    return ret;
}

/*
 * Mark the task runnable and perform wakeup-preemption.
 */
static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
               struct rq_flags *rf)
{
    check_preempt_curr(rq, p, wake_flags);
    p->state = TASK_RUNNING;
    trace_sched_wakeup(p);

#ifdef CONFIG_SMP
    if (p->sched_class->task_woken) {
        /*
         * Our task @p is fully woken up and running; so its safe to
         * drop the rq->lock, hereafter rq is only used for statistics.
         */
        rq_unpin_lock(rq, rf);
        p->sched_class->task_woken(rq, p);
        rq_repin_lock(rq, rf);
    }

    if (rq->idle_stamp) {
        u64 delta = rq_clock(rq) - rq->idle_stamp;
        u64 max = 2*rq->max_idle_balance_cost;

        update_avg(&rq->avg_idle, delta);

        if (rq->avg_idle > max)
            rq->avg_idle = max;

        rq->idle_stamp = 0;
    }
#endif
}

可以尝试使用以下优化方式: 1. 减少函数调用次数。可以将 `can_app_il_get_rx_ESP_v_Signal_1` 和 `can_app_il_get_rx_ESP_QBit_v_Signal` 的返回值存储在局部变量中,避免多次调用。 2. 避免过多的变量复制。可以直接将 `speed_ret&CAN_IL_TIMEOUT` 的结果赋值给 `speed_timeout`,避免中间变量的使用。 3. 尽可能减少全局变量的使用。可以将 `can_sig_vehicle_speed` 和 `can_sig_vehicle_speed_valid` 作为参数传递给 `can_app_output_vehicle_speed` 函数,避免全局变量的使用。 4. 将静态变量的初始化移动到函数外部,避免每次函数调用都进行初始化。 下面是优化后的代码示例: ```c void can_app_sig_rx_vehicle_speed_polling(U16 *vehicle_speed, U8 *vehicle_speed_valid) { U16 speed = 0; U8 speed_ret = 0; U8 speed_valid = 0; static U8 speed_timeout_bk = 0; static BOOL speed_update_flag = FALSE; static BOOL speed_timeout = FALSE; speed_ret = can_app_il_get_rx_ESP_v_Signal_1(&speed); can_app_il_get_rx_ESP_QBit_v_Signal(&speed_valid); speed_timeout = (speed_ret & CAN_IL_TIMEOUT) ? TRUE : FALSE; if (speed_timeout_bk != speed_timeout) { if (speed_timeout) { *vehicle_speed_valid = 1; speed_update_flag = TRUE; } speed_timeout_bk = speed_timeout; } if (*vehicle_speed != speed || *vehicle_speed_valid != speed_valid || speed_update_flag) { *vehicle_speed = speed; *vehicle_speed_valid = speed_valid; can_app_output_vehicle_speed(*vehicle_speed, *vehicle_speed_valid); speed_update_flag = FALSE; } } ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值