Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 1 | /* |
Ingo Molnar | 51e0304 | 2009-09-16 08:54:45 +0200 | [diff] [blame] | 2 | * Only give sleepers 50% of their service deficit. This allows |
| 3 | * them to run sooner, but does not allow tons of sleepers to |
| 4 | * rip the spread apart. |
| 5 | */ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 6 | SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 7 | |
| 8 | /* |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 9 | * Place new tasks ahead so that they do not starve already running |
| 10 | * tasks |
| 11 | */ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 12 | SCHED_FEAT(START_DEBIT, true) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 13 | |
| 14 | /* |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 15 | * Prefer to schedule the task we woke last (assuming it failed |
| 16 | * wakeup-preemption), since its likely going to consume data we |
| 17 | * touched, increases cache locality. |
| 18 | */ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 19 | SCHED_FEAT(NEXT_BUDDY, false) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 20 | |
| 21 | /* |
| 22 | * Prefer to schedule the task that ran last (when we did |
| 23 | * wake-preempt) as that likely will touch the same data, increases |
| 24 | * cache locality. |
| 25 | */ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 26 | SCHED_FEAT(LAST_BUDDY, true) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * Consider buddies to be cache hot, decreases the likelyness of a |
| 30 | * cache buddy being migrated away, increases cache locality. |
| 31 | */ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 32 | SCHED_FEAT(CACHE_HOT_BUDDY, true) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 33 | |
Peter Zijlstra | 8e6598a | 2009-09-03 13:20:03 +0200 | [diff] [blame] | 34 | /* |
Ingo Molnar | 8ed92e5 | 2012-10-14 14:28:50 +0200 | [diff] [blame] | 35 | * Allow wakeup-time preemption of the current task: |
| 36 | */ |
| 37 | SCHED_FEAT(WAKEUP_PREEMPTION, true) |
| 38 | |
| 39 | /* |
Peter Zijlstra | 8e6598a | 2009-09-03 13:20:03 +0200 | [diff] [blame] | 40 | * Use arch dependent cpu power functions |
| 41 | */ |
Vincent Guittot | bc2a27c | 2012-07-09 11:27:06 +0200 | [diff] [blame] | 42 | SCHED_FEAT(ARCH_POWER, true) |
Peter Zijlstra | 8e6598a | 2009-09-03 13:20:03 +0200 | [diff] [blame] | 43 | |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 44 | SCHED_FEAT(HRTICK, false) |
| 45 | SCHED_FEAT(DOUBLE_TICK, false) |
| 46 | SCHED_FEAT(LB_BIAS, true) |
Peter Zijlstra | e26af0e | 2009-09-11 12:31:23 +0200 | [diff] [blame] | 47 | |
| 48 | /* |
Glauber Costa | 095c0aa | 2011-07-11 15:28:18 -0400 | [diff] [blame] | 49 | * Decrement CPU power based on time not spent running tasks |
Venkatesh Pallipadi | aa48380 | 2010-10-04 17:03:22 -0700 | [diff] [blame] | 50 | */ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 51 | SCHED_FEAT(NONTASK_POWER, true) |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 52 | |
| 53 | /* |
| 54 | * Queue remote wakeups on the target CPU and process them |
| 55 | * using the scheduler IPI. Reduces rq->lock contention/bounces. |
| 56 | */ |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 57 | SCHED_FEAT(TTWU_QUEUE, true) |
Peter Zijlstra | e3589f6 | 2011-07-15 10:35:52 +0200 | [diff] [blame] | 58 | |
Peter Zijlstra | f8b6d1c | 2011-07-06 14:20:14 +0200 | [diff] [blame] | 59 | SCHED_FEAT(FORCE_SD_OVERLAP, false) |
| 60 | SCHED_FEAT(RT_RUNTIME_SHARE, true) |
Peter Zijlstra | eb95308 | 2012-04-17 13:38:40 +0200 | [diff] [blame] | 61 | SCHED_FEAT(LB_MIN, false) |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 62 | |
| 63 | /* |
Mel Gorman | 1a687c2 | 2012-11-22 11:16:36 +0000 | [diff] [blame] | 64 | * Apply the automatic NUMA scheduling policy. Enabled automatically |
| 65 | * at runtime if running on a NUMA machine. Can be controlled via |
Mel Gorman | 5bca230 | 2012-11-22 14:40:03 +0000 | [diff] [blame] | 66 | * numa_balancing=. Allow PTE scanning to be forced on UMA machines |
| 67 | * for debugging the core machinery. |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 68 | */ |
| 69 | #ifdef CONFIG_NUMA_BALANCING |
Mel Gorman | 1a687c2 | 2012-11-22 11:16:36 +0000 | [diff] [blame] | 70 | SCHED_FEAT(NUMA, false) |
Mel Gorman | 5bca230 | 2012-11-22 14:40:03 +0000 | [diff] [blame] | 71 | SCHED_FEAT(NUMA_FORCE, false) |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 72 | #endif |