blob: 056e4eb5e5ecac1f9b930f9b39240d45778e378c [file] [log] [blame]
Abhijeet Dharmapurikarf392b1d2012-08-23 13:49:45 -07001 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "mpd %s: " fmt, __func__
14
15#include <linux/cpumask.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/kthread.h>
21#include <linux/kobject.h>
22#include <linux/ktime.h>
23#include <linux/hrtimer.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/cpu.h>
27#include <linux/stringify.h>
28#include <linux/sched.h>
29#include <linux/platform_device.h>
30#include <linux/debugfs.h>
31#include <linux/cpu_pm.h>
32#include <linux/cpu.h>
33#include <linux/cpufreq.h>
34#include <linux/sched.h>
35#include <linux/rq_stats.h>
36#include <asm/atomic.h>
37#include <asm/page.h>
38#include <mach/msm_dcvs.h>
39#include <mach/msm_dcvs_scm.h>
40
41#define DEFAULT_RQ_AVG_POLL_MS (1)
42
43struct mpd_attrib {
44 struct kobj_attribute enabled;
45 struct kobj_attribute rq_avg_poll_ms;
46 struct kobj_attribute iowait_threshold_pct;
47
48 struct kobj_attribute em_win_size_min_us;
49 struct kobj_attribute em_win_size_max_us;
50 struct kobj_attribute em_max_util_pct;
51 struct kobj_attribute mp_em_rounding_point_min;
52 struct kobj_attribute mp_em_rounding_point_max;
53 struct kobj_attribute online_util_pct_min;
54 struct kobj_attribute online_util_pct_max;
55 struct kobj_attribute slack_time_min_us;
56 struct kobj_attribute slack_time_max_us;
57 struct kobj_attribute hp_up_max_ms;
58 struct kobj_attribute hp_up_ms;
59 struct kobj_attribute hp_up_count;
60 struct kobj_attribute hp_dw_max_ms;
61 struct kobj_attribute hp_dw_ms;
62 struct kobj_attribute hp_dw_count;
63 struct attribute_group attrib_group;
64};
65
66struct msm_mpd_scm_data {
67 enum msm_dcvs_scm_event event;
68 int nr;
69};
70
71struct mpdecision {
72 uint32_t enabled;
73 atomic_t algo_cpu_mask;
74 uint32_t rq_avg_poll_ms;
75 uint32_t iowait_threshold_pct;
76 ktime_t next_update;
77 uint32_t slack_us;
78 struct msm_mpd_algo_param mp_param;
79 struct mpd_attrib attrib;
80 struct mutex lock;
81 struct task_struct *task;
82 struct task_struct *hptask;
83 struct hrtimer slack_timer;
84 struct msm_mpd_scm_data data;
85 int hpupdate;
86 wait_queue_head_t wait_q;
87 wait_queue_head_t wait_hpq;
88};
89
90struct hp_latency {
91 int hp_up_max_ms;
92 int hp_up_ms;
93 int hp_up_count;
94 int hp_dw_max_ms;
95 int hp_dw_ms;
96 int hp_dw_count;
97};
98
99static DEFINE_PER_CPU(struct hrtimer, rq_avg_poll_timer);
100static DEFINE_SPINLOCK(rq_avg_lock);
101
102enum {
103 MSM_MPD_DEBUG_NOTIFIER = BIT(0),
104 MSM_MPD_CORE_STATUS = BIT(1),
105 MSM_MPD_SLACK_TIMER = BIT(2),
106};
107
108enum {
109 HPUPDATE_WAITING = 0, /* we are waiting for cpumask update */
110 HPUPDATE_SCHEDULED = 1, /* we are in the process of hotplugging */
111 HPUPDATE_IN_PROGRESS = 2, /* we are in the process of hotplugging */
112};
113
114static int msm_mpd_enabled = 1;
115module_param_named(enabled, msm_mpd_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
116
117static struct dentry *debugfs_base;
118static struct mpdecision msm_mpd;
119
120static struct hp_latency hp_latencies;
121
122static unsigned long last_nr;
123static int num_present_hundreds;
124
125#define RQ_AVG_INSIGNIFICANT_BITS 3
126static bool ok_to_update_tz(int nr, int last_nr)
127{
128 /*
129 * Exclude unnecessary TZ reports if run queue haven't changed much from
130 * the last reported value. The left shift by INSIGNIFICANT_BITS is to
131 * filter out small changes in the run queue average which won't cause
132 * a online cpu mask change. Also if the cpu online count does not match
133 * the count requested by TZ and we are not in the process of bringing
134 * cpus online as indicated by a HPUPDATE_IN_PROGRESS in msm_mpd.hpdata
135 */
136 return
137 (((nr >> RQ_AVG_INSIGNIFICANT_BITS)
138 != (last_nr >> RQ_AVG_INSIGNIFICANT_BITS))
139 || ((hweight32(atomic_read(&msm_mpd.algo_cpu_mask))
140 != num_online_cpus())
141 && (msm_mpd.hpupdate != HPUPDATE_IN_PROGRESS)));
142}
143
144static enum hrtimer_restart msm_mpd_rq_avg_poll_timer(struct hrtimer *timer)
145{
146 int nr, nr_iowait;
147 ktime_t curr_time = ktime_get();
148 unsigned long flags;
149 int cpu = smp_processor_id();
150 enum hrtimer_restart restart = HRTIMER_RESTART;
151
152 spin_lock_irqsave(&rq_avg_lock, flags);
153 /* If running on the wrong cpu, don't restart */
154 if (&per_cpu(rq_avg_poll_timer, cpu) != timer)
155 restart = HRTIMER_NORESTART;
156
157 if (ktime_to_ns(ktime_sub(curr_time, msm_mpd.next_update)) < 0)
158 goto out;
159
160 msm_mpd.next_update = ktime_add_ns(curr_time,
161 (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
162
163 sched_get_nr_running_avg(&nr, &nr_iowait);
164
165 if ((nr_iowait >= msm_mpd.iowait_threshold_pct) && (nr < last_nr))
166 nr = last_nr;
167
168 if (nr > num_present_hundreds)
169 nr = num_present_hundreds;
170
171 if (ok_to_update_tz(nr, last_nr)) {
172 hrtimer_try_to_cancel(&msm_mpd.slack_timer);
173 msm_mpd.data.nr = nr;
174 msm_mpd.data.event = MSM_DCVS_SCM_RUNQ_UPDATE;
175 wake_up(&msm_mpd.wait_q);
176 last_nr = nr;
177 }
178
179out:
180 hrtimer_set_expires(timer, msm_mpd.next_update);
181 spin_unlock_irqrestore(&rq_avg_lock, flags);
182 /* set next expiration */
183 return restart;
184}
185
186static void bring_up_cpu(int cpu)
187{
188 int cpu_action_time_ms;
189 int time_taken_ms;
190 int ret, ret1, ret2;
191
192 cpu_action_time_ms = ktime_to_ms(ktime_get());
193 ret = cpu_up(cpu);
194 if (ret) {
195 pr_debug("Error %d online core %d\n", ret, cpu);
196 } else {
197 time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
198 if (time_taken_ms > hp_latencies.hp_up_max_ms)
199 hp_latencies.hp_up_max_ms = time_taken_ms;
200 if (time_taken_ms > 5)
201 pr_warn("cpu_up for cpu%d exceeded 5ms (%d)\n",
202 cpu, time_taken_ms);
203 hp_latencies.hp_up_ms += time_taken_ms;
204 hp_latencies.hp_up_count++;
205 ret = msm_dcvs_scm_event(
206 CPU_OFFSET + cpu,
207 MSM_DCVS_SCM_CORE_ONLINE,
208 cpufreq_get(cpu),
209 (uint32_t) time_taken_ms * USEC_PER_MSEC,
210 &ret1, &ret2);
211 if (ret)
212 pr_err("Error sending hotplug scm event err=%d\n", ret);
213 }
214}
215
216static void bring_down_cpu(int cpu)
217{
218 int cpu_action_time_ms;
219 int time_taken_ms;
220 int ret, ret1, ret2;
221
222 BUG_ON(cpu == 0);
223 cpu_action_time_ms = ktime_to_ms(ktime_get());
224 ret = cpu_down(cpu);
225 if (ret) {
226 pr_debug("Error %d offline" "core %d\n", ret, cpu);
227 } else {
228 time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
229 if (time_taken_ms > hp_latencies.hp_dw_max_ms)
230 hp_latencies.hp_dw_max_ms = time_taken_ms;
231 if (time_taken_ms > 5)
232 pr_warn("cpu_down for cpu%d exceeded 5ms (%d)\n",
233 cpu, time_taken_ms);
234 hp_latencies.hp_dw_ms += time_taken_ms;
235 hp_latencies.hp_dw_count++;
236 ret = msm_dcvs_scm_event(
237 CPU_OFFSET + cpu,
238 MSM_DCVS_SCM_CORE_OFFLINE,
239 (uint32_t) time_taken_ms * USEC_PER_MSEC,
240 0,
241 &ret1, &ret2);
242 if (ret)
243 pr_err("Error sending hotplug scm event err=%d\n", ret);
244 }
245}
246
247static int __ref msm_mpd_update_scm(enum msm_dcvs_scm_event event, int nr)
248{
249 int ret = 0;
250 uint32_t req_cpu_mask = 0;
251 uint32_t slack_us = 0;
252 uint32_t param0 = 0;
253
254 if (event == MSM_DCVS_SCM_RUNQ_UPDATE)
255 param0 = nr;
256
257 ret = msm_dcvs_scm_event(0, event, param0, 0,
258 &req_cpu_mask, &slack_us);
259
260 if (ret) {
261 pr_err("Error (%d) sending event %d, param %d\n", ret, event,
262 param0);
263 return ret;
264 }
265
266 msm_mpd.slack_us = slack_us;
267 atomic_set(&msm_mpd.algo_cpu_mask, req_cpu_mask);
268 msm_mpd.hpupdate = HPUPDATE_SCHEDULED;
269 wake_up(&msm_mpd.wait_hpq);
270
271 /* Start MP Decision slack timer */
272 if (slack_us) {
273 hrtimer_cancel(&msm_mpd.slack_timer);
274 ret = hrtimer_start(&msm_mpd.slack_timer,
275 ktime_set(0, slack_us * NSEC_PER_USEC),
276 HRTIMER_MODE_REL_PINNED);
277 if (ret)
278 pr_err("Failed to register slack timer (%d) %d\n",
279 slack_us, ret);
280 }
281
282 return ret;
283}
284
285static enum hrtimer_restart msm_mpd_slack_timer(struct hrtimer *timer)
286{
287 unsigned long flags;
288
289 spin_lock_irqsave(&rq_avg_lock, flags);
290 if (msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE)
291 goto out;
292
293 msm_mpd.data.nr = 0;
294 msm_mpd.data.event = MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED;
295 wake_up(&msm_mpd.wait_q);
296out:
297 spin_unlock_irqrestore(&rq_avg_lock, flags);
298 return HRTIMER_NORESTART;
299}
300
301static int msm_mpd_idle_notifier(struct notifier_block *self,
302 unsigned long cmd, void *v)
303{
304 int cpu = smp_processor_id();
305 unsigned long flags;
306
307 switch (cmd) {
308 case CPU_PM_EXIT:
309 spin_lock_irqsave(&rq_avg_lock, flags);
310 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
311 msm_mpd.next_update,
312 HRTIMER_MODE_ABS_PINNED);
313 spin_unlock_irqrestore(&rq_avg_lock, flags);
314 break;
315 case CPU_PM_ENTER:
316 hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
317 break;
318 default:
319 break;
320 }
321
322 return NOTIFY_OK;
323}
324
325static int msm_mpd_hotplug_notifier(struct notifier_block *self,
326 unsigned long action, void *hcpu)
327{
328 int cpu = (int)hcpu;
329 unsigned long flags;
330
331 switch (action & (~CPU_TASKS_FROZEN)) {
332 case CPU_STARTING:
333 spin_lock_irqsave(&rq_avg_lock, flags);
334 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
335 msm_mpd.next_update,
336 HRTIMER_MODE_ABS_PINNED);
337 spin_unlock_irqrestore(&rq_avg_lock, flags);
338 break;
339 default:
340 break;
341 }
342
343 return NOTIFY_OK;
344}
345
346static struct notifier_block msm_mpd_idle_nb = {
347 .notifier_call = msm_mpd_idle_notifier,
348};
349
350static struct notifier_block msm_mpd_hotplug_nb = {
351 .notifier_call = msm_mpd_hotplug_notifier,
352};
353
354static int __cpuinit msm_mpd_do_hotplug(void *data)
355{
356 int *event = (int *)data;
357 static struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1};
358 int cpu;
359
360 sched_setscheduler(current, SCHED_FIFO, &param);
361
362 while (1) {
363 wait_event(msm_mpd.wait_hpq, *event || kthread_should_stop());
364 if (kthread_should_stop())
365 break;
366
367 msm_mpd.hpupdate = HPUPDATE_IN_PROGRESS;
368 /*
369 * Bring online any offline cores, then offline any online
370 * cores. Whenever a core is off/onlined restart the procedure
371 * in case a new core is desired to be brought online in the
372 * mean time.
373 */
374restart:
375 for_each_possible_cpu(cpu) {
376 if ((atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
377 && !cpu_online(cpu)) {
378 bring_up_cpu(cpu);
379 if (cpu_online(cpu))
380 goto restart;
381 }
382 }
383
384 for_each_possible_cpu(cpu) {
385 if (!(atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
386 && cpu_online(cpu)) {
387 bring_down_cpu(cpu);
388 if (!cpu_online(cpu))
389 goto restart;
390 }
391 }
392 msm_mpd.hpupdate = HPUPDATE_WAITING;
393 }
394
395 return 0;
396}
397
398static int msm_mpd_do_update_scm(void *data)
399{
400 struct msm_mpd_scm_data *scm_data = (struct msm_mpd_scm_data *)data;
401 static struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1};
402 unsigned long flags;
403 enum msm_dcvs_scm_event event;
404 int nr;
405
406 sched_setscheduler(current, SCHED_FIFO, &param);
407
408 while (1) {
409 wait_event(msm_mpd.wait_q,
410 msm_mpd.data.event == MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED
411 || msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE
412 || kthread_should_stop());
413
414 if (kthread_should_stop())
415 break;
416
417 spin_lock_irqsave(&rq_avg_lock, flags);
418 event = scm_data->event;
419 nr = scm_data->nr;
420 scm_data->event = 0;
421 scm_data->nr = 0;
422 spin_unlock_irqrestore(&rq_avg_lock, flags);
423
424 msm_mpd_update_scm(event, nr);
425 }
426 return 0;
427}
428
429static int __ref msm_mpd_set_enabled(uint32_t enable)
430{
431 int ret = 0;
432 int ret0 = 0;
433 int ret1 = 0;
434 int cpu;
435 static uint32_t last_enable;
436
437 enable = (enable > 0) ? 1 : 0;
438 if (last_enable == enable)
439 return ret;
440
441 if (enable) {
442 ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param);
443 if (ret) {
444 pr_err("Error(%d): msm_mpd_scm_set_algo_params failed\n",
445 ret);
446 return ret;
447 }
448 }
449
450 ret = msm_dcvs_scm_event(0, MSM_DCVS_SCM_MPD_ENABLE, enable, 0,
451 &ret0, &ret1);
452 if (ret) {
453 pr_err("Error(%d) %s MP Decision\n",
454 ret, (enable ? "enabling" : "disabling"));
455 } else {
456 last_enable = enable;
457 last_nr = 0;
458 }
459 if (enable) {
460 msm_mpd.next_update = ktime_add_ns(ktime_get(),
461 (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
462 msm_mpd.task = kthread_run(msm_mpd_do_update_scm,
463 &msm_mpd.data, "msm_mpdecision");
464 if (IS_ERR(msm_mpd.task))
465 return -EFAULT;
466
467 msm_mpd.hptask = kthread_run(msm_mpd_do_hotplug,
468 &msm_mpd.hpupdate, "msm_hp");
469 if (IS_ERR(msm_mpd.hptask))
470 return -EFAULT;
471
472 for_each_online_cpu(cpu)
473 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
474 msm_mpd.next_update,
475 HRTIMER_MODE_ABS_PINNED);
476 cpu_pm_register_notifier(&msm_mpd_idle_nb);
477 register_cpu_notifier(&msm_mpd_hotplug_nb);
478 msm_mpd.enabled = 1;
479 } else {
480 for_each_online_cpu(cpu)
481 hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
482 kthread_stop(msm_mpd.hptask);
483 kthread_stop(msm_mpd.task);
484 cpu_pm_unregister_notifier(&msm_mpd_idle_nb);
485 unregister_cpu_notifier(&msm_mpd_hotplug_nb);
486 msm_mpd.enabled = 0;
487 }
488
489 return ret;
490}
491
492static int msm_mpd_set_rq_avg_poll_ms(uint32_t val)
493{
494 /*
495 * No need to do anything. Just let the timer set its own next poll
496 * interval when it next fires.
497 */
498 msm_mpd.rq_avg_poll_ms = val;
499 return 0;
500}
501
502static int msm_mpd_set_iowait_threshold_pct(uint32_t val)
503{
504 /*
505 * No need to do anything. Just let the timer set its own next poll
506 * interval when it next fires.
507 */
508 msm_mpd.iowait_threshold_pct = val;
509 return 0;
510}
511
512#define MPD_ALGO_PARAM(_name, _param) \
513static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
514 struct kobj_attribute *attr, char *buf) \
515{ \
516 return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
517} \
518static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
519 struct kobj_attribute *attr, const char *buf, size_t count) \
520{ \
521 int ret = 0; \
522 uint32_t val; \
523 uint32_t old_val; \
524 mutex_lock(&msm_mpd.lock); \
525 ret = kstrtouint(buf, 10, &val); \
526 if (ret) { \
527 pr_err("Invalid input %s for %s %d\n", \
528 buf, __stringify(_name), ret);\
529 return 0; \
530 } \
531 old_val = _param; \
532 _param = val; \
533 ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param); \
534 if (ret) { \
535 pr_err("Error %d returned when setting algo param %s to %d\n",\
536 ret, __stringify(_name), val); \
537 _param = old_val; \
538 } \
539 mutex_unlock(&msm_mpd.lock); \
540 return count; \
541}
542
543#define MPD_PARAM(_name, _param) \
544static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
545 struct kobj_attribute *attr, char *buf) \
546{ \
547 return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
548} \
549static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
550 struct kobj_attribute *attr, const char *buf, size_t count) \
551{ \
552 int ret = 0; \
553 uint32_t val; \
554 uint32_t old_val; \
555 mutex_lock(&msm_mpd.lock); \
556 ret = kstrtouint(buf, 10, &val); \
557 if (ret) { \
558 pr_err("Invalid input %s for %s %d\n", \
559 buf, __stringify(_name), ret);\
560 return 0; \
561 } \
562 old_val = _param; \
563 ret = msm_mpd_set_##_name(val); \
564 if (ret) { \
565 pr_err("Error %d returned when setting algo param %s to %d\n",\
566 ret, __stringify(_name), val); \
567 _param = old_val; \
568 } \
569 mutex_unlock(&msm_mpd.lock); \
570 return count; \
571}
572
573#define MPD_RW_ATTRIB(i, _name) \
574 msm_mpd.attrib._name.attr.name = __stringify(_name); \
575 msm_mpd.attrib._name.attr.mode = S_IRUGO | S_IWUSR; \
576 msm_mpd.attrib._name.show = msm_mpd_attr_##_name##_show; \
577 msm_mpd.attrib._name.store = msm_mpd_attr_##_name##_store; \
578 msm_mpd.attrib.attrib_group.attrs[i] = &msm_mpd.attrib._name.attr;
579
580MPD_PARAM(enabled, msm_mpd.enabled);
581MPD_PARAM(rq_avg_poll_ms, msm_mpd.rq_avg_poll_ms);
582MPD_PARAM(iowait_threshold_pct, msm_mpd.iowait_threshold_pct);
583MPD_ALGO_PARAM(em_win_size_min_us, msm_mpd.mp_param.em_win_size_min_us);
584MPD_ALGO_PARAM(em_win_size_max_us, msm_mpd.mp_param.em_win_size_max_us);
585MPD_ALGO_PARAM(em_max_util_pct, msm_mpd.mp_param.em_max_util_pct);
586MPD_ALGO_PARAM(mp_em_rounding_point_min,
587 msm_mpd.mp_param.mp_em_rounding_point_min);
588MPD_ALGO_PARAM(mp_em_rounding_point_max,
589 msm_mpd.mp_param.mp_em_rounding_point_max);
590MPD_ALGO_PARAM(online_util_pct_min, msm_mpd.mp_param.online_util_pct_min);
591MPD_ALGO_PARAM(online_util_pct_max, msm_mpd.mp_param.online_util_pct_max);
592MPD_ALGO_PARAM(slack_time_min_us, msm_mpd.mp_param.slack_time_min_us);
593MPD_ALGO_PARAM(slack_time_max_us, msm_mpd.mp_param.slack_time_max_us);
594MPD_ALGO_PARAM(hp_up_max_ms, hp_latencies.hp_up_max_ms);
595MPD_ALGO_PARAM(hp_up_ms, hp_latencies.hp_up_ms);
596MPD_ALGO_PARAM(hp_up_count, hp_latencies.hp_up_count);
597MPD_ALGO_PARAM(hp_dw_max_ms, hp_latencies.hp_dw_max_ms);
598MPD_ALGO_PARAM(hp_dw_ms, hp_latencies.hp_dw_ms);
599MPD_ALGO_PARAM(hp_dw_count, hp_latencies.hp_dw_count);
600
601static int __devinit msm_mpd_probe(struct platform_device *pdev)
602{
603 struct kobject *module_kobj = NULL;
604 int ret = 0;
605 const int attr_count = 19;
606 struct msm_mpd_algo_param *param = NULL;
607
608 param = pdev->dev.platform_data;
609
610 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
611 if (!module_kobj) {
612 pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
613 ret = -ENOENT;
614 goto done;
615 }
616
617 msm_mpd.attrib.attrib_group.attrs =
618 kzalloc(attr_count * sizeof(struct attribute *), GFP_KERNEL);
619 if (!msm_mpd.attrib.attrib_group.attrs) {
620 ret = -ENOMEM;
621 goto done;
622 }
623
624 MPD_RW_ATTRIB(0, enabled);
625 MPD_RW_ATTRIB(1, rq_avg_poll_ms);
626 MPD_RW_ATTRIB(2, iowait_threshold_pct);
627 MPD_RW_ATTRIB(3, em_win_size_min_us);
628 MPD_RW_ATTRIB(4, em_win_size_max_us);
629 MPD_RW_ATTRIB(5, em_max_util_pct);
630 MPD_RW_ATTRIB(6, mp_em_rounding_point_min);
631 MPD_RW_ATTRIB(7, mp_em_rounding_point_max);
632 MPD_RW_ATTRIB(8, online_util_pct_min);
633 MPD_RW_ATTRIB(9, online_util_pct_max);
634 MPD_RW_ATTRIB(10, slack_time_min_us);
635 MPD_RW_ATTRIB(11, slack_time_max_us);
636 MPD_RW_ATTRIB(12, hp_up_max_ms);
637 MPD_RW_ATTRIB(13, hp_up_ms);
638 MPD_RW_ATTRIB(14, hp_up_count);
639 MPD_RW_ATTRIB(15, hp_dw_max_ms);
640 MPD_RW_ATTRIB(16, hp_dw_ms);
641 MPD_RW_ATTRIB(17, hp_dw_count);
642
643 msm_mpd.attrib.attrib_group.attrs[18] = NULL;
644 ret = sysfs_create_group(module_kobj, &msm_mpd.attrib.attrib_group);
645 if (ret)
646 pr_err("Unable to create sysfs objects :%d\n", ret);
647
648 msm_mpd.rq_avg_poll_ms = DEFAULT_RQ_AVG_POLL_MS;
649
650 memcpy(&msm_mpd.mp_param, param, sizeof(struct msm_mpd_algo_param));
651
652 debugfs_base = debugfs_create_dir("msm_mpdecision", NULL);
653 if (!debugfs_base) {
654 pr_err("Cannot create debugfs base msm_mpdecision\n");
655 ret = -ENOENT;
656 goto done;
657 }
658
659done:
660 if (ret && debugfs_base)
661 debugfs_remove(debugfs_base);
662
663 return ret;
664}
665
666static int __devexit msm_mpd_remove(struct platform_device *pdev)
667{
668 platform_set_drvdata(pdev, NULL);
669
670 return 0;
671}
672
673static struct platform_driver msm_mpd_driver = {
674 .probe = msm_mpd_probe,
675 .remove = __devexit_p(msm_mpd_remove),
676 .driver = {
677 .name = "msm_mpdecision",
678 .owner = THIS_MODULE,
679 },
680};
681
682static int __init msm_mpdecision_init(void)
683{
684 int cpu;
685 if (!msm_mpd_enabled) {
686 pr_info("Not enabled\n");
687 return 0;
688 }
689
690 num_present_hundreds = 100 * num_present_cpus();
691
692 hrtimer_init(&msm_mpd.slack_timer, CLOCK_MONOTONIC,
693 HRTIMER_MODE_REL_PINNED);
694 msm_mpd.slack_timer.function = msm_mpd_slack_timer;
695
696 for_each_possible_cpu(cpu) {
697 hrtimer_init(&per_cpu(rq_avg_poll_timer, cpu),
698 CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
699 per_cpu(rq_avg_poll_timer, cpu).function
700 = msm_mpd_rq_avg_poll_timer;
701 }
702 mutex_init(&msm_mpd.lock);
703 init_waitqueue_head(&msm_mpd.wait_q);
704 init_waitqueue_head(&msm_mpd.wait_hpq);
705 return platform_driver_register(&msm_mpd_driver);
706}
707late_initcall(msm_mpdecision_init);