blob: 8eaf2f770a28db765e37f3a815ce415a6e259628 [file] [log] [blame]
Jeff Ohlsteinfe9a0d02012-06-19 15:59:46 -07001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * Scheduler hook for average runqueue determination
15 */
16#include <linux/module.h>
17#include <linux/percpu.h>
18#include <linux/hrtimer.h>
19#include <linux/sched.h>
20#include <linux/math64.h>
21
22static DEFINE_PER_CPU(u64, nr_prod_sum);
23static DEFINE_PER_CPU(u64, last_time);
24static DEFINE_PER_CPU(u64, nr);
25static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
26static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
27static s64 last_get_time;
28
29/**
30 * sched_get_nr_running_avg
31 * @return: Average nr_running and iowait value since last poll.
32 * Returns the avg * 100 to return up to two decimal points
33 * of accuracy.
34 *
35 * Obtains the average nr_running value since the last poll.
36 * This function may not be called concurrently with itself
37 */
38void sched_get_nr_running_avg(int *avg, int *iowait_avg)
39{
40 int cpu;
41 u64 curr_time = sched_clock();
42 u64 diff = curr_time - last_get_time;
43 u64 tmp_avg = 0, tmp_iowait = 0;
44
45 *avg = 0;
46 *iowait_avg = 0;
47
48 if (!diff)
49 return;
50
51 last_get_time = curr_time;
52 /* read and reset nr_running counts */
53 for_each_possible_cpu(cpu) {
54 unsigned long flags;
55
56 spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
57 tmp_avg += per_cpu(nr_prod_sum, cpu);
58 tmp_avg += per_cpu(nr, cpu) *
59 (curr_time - per_cpu(last_time, cpu));
60 tmp_iowait = per_cpu(iowait_prod_sum, cpu);
61 tmp_iowait += nr_iowait_cpu(cpu) *
62 (curr_time - per_cpu(last_time, cpu));
63 per_cpu(last_time, cpu) = curr_time;
64 per_cpu(nr_prod_sum, cpu) = 0;
65 per_cpu(iowait_prod_sum, cpu) = 0;
66 spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
67 }
68
69 *avg = (int)div64_u64(tmp_avg * 100, diff);
70 *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
71
72 BUG_ON(*avg < 0);
73 pr_debug("%s - avg:%d\n", __func__, *avg);
74 BUG_ON(*iowait_avg < 0);
75 pr_debug("%s - avg:%d\n", __func__, *iowait_avg);
76}
77EXPORT_SYMBOL(sched_get_nr_running_avg);
78
79/**
80 * sched_update_nr_prod
81 * @cpu: The core id of the nr running driver.
82 * @nr: Updated nr running value for cpu.
83 * @inc: Whether we are increasing or decreasing the count
84 * @return: N/A
85 *
86 * Update average with latest nr_running value for CPU
87 */
88void sched_update_nr_prod(int cpu, unsigned long nr_running, bool inc)
89{
90 int diff;
91 s64 curr_time;
92 unsigned long flags;
93
94 spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
95 curr_time = sched_clock();
96 diff = curr_time - per_cpu(last_time, cpu);
97 per_cpu(last_time, cpu) = curr_time;
98 per_cpu(nr, cpu) = nr_running + (inc ? 1 : -1);
99
100 BUG_ON(per_cpu(nr, cpu) < 0);
101
102 per_cpu(nr_prod_sum, cpu) += nr_running * diff;
103 per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
104 spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
105}
106EXPORT_SYMBOL(sched_update_nr_prod);