6.3 Updating the Time and Date | | | | | | | |
| | //kernel/timer.c | | | | | | |
| /* | | | | | | | | |
| * Called by the timer interrupt. xtime_lock must already be taken | | | |
| * by the timer IRQ! | | | | | | | |
| */ | | | | | | | | |
| static inline void update_times(void) | | | | | |
| { | | | | | | | | |
| | unsigned long ticks; | | | | | | |
| | | | | | | | | |
| | ticks = jiffies - wall_jiffies; | | | | | |
| | if (ticks) { | | | | | | | |
| | | wall_jiffies += ticks; | | | | | |
| | | update_wall_time(ticks); | | | | |
| | } | | | | | | | |
| | calc_load(ticks); | | | | | | |
| } | | | | | | | | |
| | | | | | | | | |
| /* | | | | | | | | |
| * Using a loop looks inefficient, but "ticks" is | | | | | |
| * usually just one (we shouldn't be losing ticks, | | | | |
| * we're doing this this way mainly for interrupt | | | | |
| * latency reasons, not because we think we'll | | | | | |
| * have lots of lost timer ticks | | | | | | |
| */ | | | | | | | | |
| static void update_wall_time(unsigned long ticks) | | | | |
| { | | | | | | | | |
| | do { | | | | | | | |
| | | ticks--; | | | | | | |
| | | update_wall_time_one_tick(); | | | | |
| | | if (xtime.tv_nsec >= 1000000000) { | | | |
| | | | xtime.tv_nsec -= 1000000000; | | | |
| | | | xtime.tv_sec++; | | | | |
| | | | second_overflow(); | | | | |
| | | } | | | | | | |
| | } while (ticks); | | | | | | |
| } | | | | | | | | |
| | | | | | | | | |
| /* | | | | | | | | |
| * calc_load - given tick count, update the avenrun load estimates. | | | |
| * This is called while holding a write_lock on xtime_lock. | | | |
| */ | | | | | | | | |
| static inline void calc_load(unsigned long ticks) | | | | |
| { | | | | | | | | |
| | unsigned long active_tasks; /* fixed-point */ | | | | |
| | static int count = LOAD_FREQ; | | | | | |
| | | | | | | | | |
| | count -= ticks; | | | | | | |
| | if (count < 0) { | | | | | | |
| | | count += LOAD_FREQ; | | | | |
| | | active_tasks = count_active_tasks(); | | | |
| | | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | | |
| | | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | | |
| | | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | | |
| | } | | | | | | | |
| } | | | | | | | | |
| | | | | | | | | |
6.4 Updating System Statistics | | | | | | | |
| The kernel, among the other time-related duties, must periodically collect various data used for: |
| · Checking the CPU resource limit of the running processes | | | |
| · Updating statistics about the local CPU workload | | | |
| · Computing the average system load | | | | | |
| · Profiling the kernel code | | | | | |
| | | | | | | | | |
| · Updating Local CPU Statistics | | | | | | |
| /* | | | | | | | | |
| * Called from the timer interrupt handler to charge one tick to the current | | |
| * process. user_tick is 1 if the tick is user time, 0 for system. | | | |
| */ | | | | | | | | |
| void update_process_times(int user_tick) | | | | | |
| { | | | | | | | | |
| | struct task_struct *p = current; | | | | | |
| | int cpu = smp_processor_id(); | | | | | |
| | | | | | | | | |
| | /* Note: this timer irq context must be accounted for as well. */ | | |
| | if (user_tick) | | | | | | | |
| | | account_user_time(p, jiffies_to_cputime(1)); | | | |
| | else | | | | | | | |
| | | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | |
| | run_local_timers(); | | | | | | |
| | if (rcu_pending(cpu)) | | | | | | |
| | | rcu_check_callbacks(cpu, user_tick); | | | |
| | scheduler_tick(); | | | | | | |
| } | | | | | | | | |
| This function performs the following steps: | | | | | |
| 1. Checks how long the current process has been running. | | | |
| in User Mode or in Kernel Mode when the timer interrupt occurred, | | |
| invokes either account_user_time( ) or account_system_time( ). | | | |
| 2. Invokes raise_softirq( ) to activate the TIMER_SOFTIRQ tasklet on the local CPU | |
| 3. If some old version of an RCU-protected data structure has to be reclaimed, | | |
| checks whether the local CPU has gone through a quiescent state and invokes | | |
| tasklet_schedule( ) to activate the rcu_tasklet tasklet of the local CPU . | | |
| 4. Invokes the scheduler_tick( ) function, which decreases the time slice counter of | |
| the current process, and checks whether its quantum is exhausted. | | | |
| | | | | | | | | |
| · Keeping Track of System Load | | | | | | |
| At every tick, update_times( ) invokes the calc_load( ) function, | | | |
| which counts the number of processes in the TASK_RUNNING or TASK_UNINTERRUPTIBLE |
| state and uses this number to update the average system load | | | |
| | | | | | | | | |
| · Profiling the Kernel Code | | | | | | |
| void profile_tick(int type, struct pt_regs *regs) | | | | |
| { | | | | | | | | |
| | if (type == CPU_PROFILING && timer_hook) | | | | |
| | | timer_hook(regs); | | | | | |
| | if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) | | |
| | | profile_hit(type, (void *)profile_pc(regs)); | | | |
| } | | | | | | | | |
| The profile_tick( ) function collects the data for the code profiler. | | | |
| It is invoked either by the do_timer_interrupt( ) function in uniprocessor systems | | |
| or by the smp_local_timer_interrupt( ) function in multiprocessor systems | | |
| | | | | | | | | |
| · Checking the NMI Watchdogs | | | | | | |
| static void default_do_nmi(struct pt_regs * regs) | | | | |
| { | | | | | | | | |
| | unsigned char reason = 0; | | | | | |
| | | | | | | | | |
| | /* Only the BSP gets external NMIs from the system. */ | | | |
| | if (!smp_processor_id()) | | | | | |
| | | reason = get_nmi_reason(); | | | | |
| | | | | | | | | |
| | if (!(reason & 0xc0)) { | | | | | | |
| | | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT) | |
| | | | | | | | == NOTIFY_STOP) |
| | | | return; | | | | | |
| #ifdef CONFIG_X86_LOCAL_APIC | | | | | |
| | | /* | | | | | | |
| | | * Ok, so this is none of the documented NMI sources, | | |
| | | * so it must be the NMI watchdog. | | | |
| | | */ | | | | | | |
| | | if (nmi_watchdog) { | | | | | |
| | | | nmi_watchdog_tick(regs); | | | |
| | | | return; | | | | | |
| | | } | | | | | | |
| #endif | | | | | | | | |
| | | unknown_nmi_error(reason, regs); | | | |
| | | return; | | | | | | |
| | } | | | | | | | |
| | if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP) | |
| | | return; | | | | | | |
| | if (reason & 0x80) | | | | | | |
| | | mem_parity_error(reason, regs); | | | | |
| | if (reason & 0x40) | | | | | | |
| | | io_check_error(reason, regs); | | | | |
| | /* | | | | | | | |
| | * Reassert NMI in case it became active meanwhile | | | |
| | * as it's edge-triggered. | | | | | |
| | */ | | | | | | | |
| | reassert_nmi(); | | | | | | |
| } | | | | | | | | |