/* * netsniff-ng - the packet sniffing beast * Copyright 2009, 2010 Daniel Borkmann. * Copyright 2010 Marek Polacek. * Subject to the GPL, version 2. */ #include #include "iosched.h" #include "die.h" #define IOPRIO_CLASS_SHIFT 13 enum { ioprio_class_none, ioprio_class_rt, ioprio_class_be, ioprio_class_idle, }; enum { ioprio_who_process = 1, ioprio_who_pgrp, ioprio_who_user, }; static const char *const to_prio[] = { "none", "realtime", "best-effort", "idle", }; static inline int ioprio_set(int which, int who, int ioprio) { return syscall(SYS_ioprio_set, which, who, ioprio); } static inline int ioprio_get(int which, int who) { return syscall(SYS_ioprio_get, which, who); } static void ioprio_setpid(pid_t pid, int ioprio, int ioclass) { int ret = ioprio_set(ioprio_who_process, pid, ioprio | ioclass << IOPRIO_CLASS_SHIFT); if (ret < 0) panic("Failed to set io prio for pid!\n"); } void ioprio_print(void) { int ioprio = ioprio_get(ioprio_who_process, getpid()); if (ioprio < 0) panic("Failed to fetch io prio for pid!\n"); else { int ioclass = ioprio >> IOPRIO_CLASS_SHIFT; if (ioclass != ioprio_class_idle) { ioprio &= 0xff; printf("%s: prio %d\n", to_prio[ioclass], ioprio); } else printf("%s\n", to_prio[ioclass]); } } void set_ioprio_rt(void) { ioprio_setpid(getpid(), 4, ioprio_class_rt); } void set_ioprio_be(void) { ioprio_setpid(getpid(), 4, ioprio_class_be); } ht'>Tobias Klauser
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-06-16 18:51:48 +0200
committerIngo Molnar <mingo@kernel.org>2016-06-27 12:17:50 +0200
commite210bffd39d01b649c94b820c28ff112673266dd (patch)
treedb99be48b206fb9fd2f43c65499a7433ddd54d54
parent630741fb60ac4e286f5396403c0d864d924c02bc (diff)
sched/fair: Fix and optimize the fork() path
The task_fork_fair() callback already calls __set_task_cpu() and takes rq->lock. If we move the sched_class::task_fork callback in sched_fork() under the existing p->pi_lock, right after its set_task_cpu() call, we can avoid doing two such calls and omit the IRQ disabling on the rq->lock. Change to __set_task_cpu() to skip the migration bits, this is a new task, not a migration. Similarly, make wake_up_new_task() use __set_task_cpu() for the same reason, the task hasn't actually migrated as it hasn't ever ran. This cures the problem of calling migrate_task_rq_fair(), which does remove_entity_from_load_avg() on tasks that have never been added to the load avg to begin with. This bug would result in transiently messed up load_avg values, averaged out after a few dozen milliseconds. This is probably the reason why this bug was not found for such a long time. Reported-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>