From: Ingo Molnar Date: Wed, 14 Nov 2012 11:18:29 +0000 (+0100) Subject: sched: Implement NUMA scanning backoff X-Git-Tag: next-20121205~43^2~2^2~4 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=634a74e5803ae9f40bf18f9c115e76a4b969183d;p=karo-tx-linux.git sched: Implement NUMA scanning backoff Back off slowly from scanning, up to sysctl_sched_numa_scan_period_max (1.6 seconds). Scan faster again if we were forced to switch to another node. This makes sure that workload in equilibrium don't get scanned as often as workloads that are still converging. Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Andrew Morton Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Mel Gorman Cc: Hugh Dickins Signed-off-by: Ingo Molnar --- diff --git a/kernel/sched/core.c b/kernel/sched/core.c index af0602fc70d7..ec3cc741a0b7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6024,6 +6024,12 @@ void sched_setnuma(struct task_struct *p, int node, int shared) if (on_rq) enqueue_task(rq, p, 0); task_rq_unlock(rq, p, &flags); + + /* + * Reset the scanning period. If the task converges + * on this node then we'll back off again: + */ + p->numa_scan_period = sysctl_sched_numa_scan_period_min; } #endif /* CONFIG_NUMA_BALANCING */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8f0e6ba6bfce..59fea2ec21e3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -865,8 +865,10 @@ static void task_numa_placement(struct task_struct *p) } } - if (max_node != p->numa_max_node) + if (max_node != p->numa_max_node) { sched_setnuma(p, max_node, task_numa_shared(p)); + goto out_backoff; + } p->numa_migrate_seq++; if (sched_feat(NUMA_SETTLE) && @@ -882,7 +884,11 @@ static void task_numa_placement(struct task_struct *p) if (shared != task_numa_shared(p)) { sched_setnuma(p, p->numa_max_node, shared); p->numa_migrate_seq = 0; + goto out_backoff; } + return; +out_backoff: + p->numa_scan_period = min(p->numa_scan_period * 2, sysctl_sched_numa_scan_period_max); } /*