#include "sched.h" /* * idle-task scheduling class. * * (NOTE: these are not related to SCHED_IDLE tasks which are * handled in sched/fair.c) */ #ifdef CONFIG_SMP static int select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) { return task_cpu(p); /* IDLE tasks as never migrated */ } #endif /* CONFIG_SMP */ /* * Idle tasks are unconditionally rescheduled: */ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) { resched_curr(rq); } static struct task_struct * pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) { put_prev_task(rq, prev); update_idle_core(rq); schedstat_inc(rq->sched_goidle); return rq->idle; } /* * It is not legal to sleep in the idle task - print a warning * message if some code attempts to do it: */ static void dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) { raw_spin_unlock_irq(&rq->lock); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); raw_spin_lock_irq(&rq->lock); } static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { rq_last_tick_reset(rq); } static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) { } static void set_curr_task_idle(struct rq *rq) { } static void switched_to_idle(struct rq *rq, struct task_struct *p) { BUG(); } static void prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) { BUG(); } static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) { return 0; } static void update_curr_idle(struct rq *rq) { } /* * Simple, special scheduling class for the per-CPU idle tasks: */ const struct sched_class idle_sched_class = { /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ /* dequeue is not valid, we print a debug message there: */ .dequeue_task = dequeue_task_idle, .check_preempt_curr = check_preempt_curr_idle, .pick_next_task = pick_next_task_idle, .put_prev_task = put_prev_task_idle, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_idle, .set_cpus_allowed = set_cpus_allowed_common, #endif .set_curr_task = set_curr_task_idle, .task_tick = task_tick_idle, .get_rr_interval = get_rr_interval_idle, .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, .update_curr = update_curr_idle, }; input class='txt' type='search' size='10' name='q' value=''/>
path: root/tools
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 09:37:34 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 21:47:58 +0100
commit0becc0ae5b42828785b589f686725ff5bc3b9b25 (patch)
treebe6d0e1f37c38ed0a7dd5da2d4b1e93f0fb43101 /tools
parent24c2503255d35c269b67162c397a1a1c1e02f6ce (diff)
x86/mce: Make timer handling more robust
Erik reported that on a preproduction hardware a CMCI storm triggers the BUG_ON in add_timer_on(). The reason is that the per CPU MCE timer is started by the CMCI logic before the MCE CPU hotplug callback starts the timer with add_timer_on(). So the timer is already queued which triggers the BUG. Using add_timer_on() is pretty pointless in this code because the timer is strictlty per CPU, initialized as pinned and all operations which arm the timer happen on the CPU to which the timer belongs. Simplify the whole machinery by using mod_timer() instead of add_timer_on() which avoids the problem because mod_timer() can handle already queued timers. Use __start_timer() everywhere so the earliest armed expiry time is preserved. Reported-by: Erik Veijola <erik.veijola@intel.com> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701310936080.3457@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'tools')