blob: f57f9740b8b7bf5fc18fe3fbf1e040c4164fde35 [file] [log] [blame]
Girish Mahadevan40abbe12012-04-25 14:58:13 -06001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/platform_device.h>
19#include <linux/of.h>
20#include <linux/cpu.h>
21#include <mach/mpm.h>
22#include <linux/notifier.h>
23#include <linux/hrtimer.h>
24#include <linux/tick.h>
25#include "spm.h"
26#include "lpm_resources.h"
27#include "rpm-notifier.h"
28#include <mach/rpm-smd.h>
29#include "idle.h"
30
31/*Debug Definitions*/
32enum {
33 MSM_LPMRS_DEBUG_RPM = BIT(0),
34 MSM_LPMRS_DEBUG_PXO = BIT(1),
35 MSM_LPMRS_DEBUG_VDD_DIG = BIT(2),
36 MSM_LPMRS_DEBUG_VDD_MEM = BIT(3),
37 MSM_LPMRS_DEBUG_L2 = BIT(4),
38 MSM_LPMRS_DEBUG_LVLS = BIT(5),
39};
40
41static int msm_lpm_debug_mask;
42module_param_named(
43 debug_mask, msm_lpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
44);
45
46static bool msm_lpm_get_rpm_notif = true;
47
48/*Macros*/
49#define VDD_DIG_ACTIVE (950000)
50#define VDD_MEM_ACTIVE (1050000)
51#define MAX_RS_NAME (16)
52#define MAX_RS_SIZE (4)
53#define IS_RPM_CTL(rs) \
54 (!strncmp(rs->name, "rpm_ctl", MAX_RS_NAME))
55
56static bool msm_lpm_beyond_limits_vdd_dig(struct msm_rpmrs_limits *limits);
57static void msm_lpm_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
58static void msm_lpm_flush_vdd_dig(int notify_rpm);
59static void msm_lpm_notify_vdd_dig(struct msm_rpm_notifier_data
60 *rpm_notifier_cb);
61
62static bool msm_lpm_beyond_limits_vdd_mem(struct msm_rpmrs_limits *limits);
63static void msm_lpm_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
64static void msm_lpm_flush_vdd_mem(int notify_rpm);
65static void msm_lpm_notify_vdd_mem(struct msm_rpm_notifier_data
66 *rpm_notifier_cb);
67
68static bool msm_lpm_beyond_limits_pxo(struct msm_rpmrs_limits *limits);
69static void msm_lpm_aggregate_pxo(struct msm_rpmrs_limits *limits);
70static void msm_lpm_flush_pxo(int notify_rpm);
71static void msm_lpm_notify_pxo(struct msm_rpm_notifier_data
72 *rpm_notifier_cb);
73
74
75static bool msm_lpm_beyond_limits_l2(struct msm_rpmrs_limits *limits);
76static void msm_lpm_flush_l2(int notify_rpm);
77static void msm_lpm_aggregate_l2(struct msm_rpmrs_limits *limits);
78
79static void msm_lpm_flush_rpm_ctl(int notify_rpm);
80
81static int msm_lpm_rpm_callback(struct notifier_block *rpm_nb,
82 unsigned long action, void *rpm_notif);
83
84static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
85 unsigned long action, void *hcpu);
86
87static ssize_t msm_lpm_resource_attr_show(
88 struct kobject *kobj, struct kobj_attribute *attr, char *buf);
89static ssize_t msm_lpm_resource_attr_store(struct kobject *kobj,
90 struct kobj_attribute *attr, const char *buf, size_t count);
91
92
93#define RPMRS_ATTR(_name) \
94 __ATTR(_name, S_IRUGO|S_IWUSR, \
95 msm_lpm_resource_attr_show, msm_lpm_resource_attr_store)
96
97/*Data structures*/
98struct msm_lpm_rs_data {
99 uint32_t type;
100 uint32_t id;
101 uint32_t key;
102 uint32_t value;
103 uint32_t default_value;
104 struct msm_rpm_request *handle;
105};
106
107struct msm_lpm_resource {
108 struct msm_lpm_rs_data rs_data;
109 uint32_t sleep_value;
110 char name[MAX_RS_NAME];
111
112 uint32_t enable_low_power;
113 bool valid;
114
115 bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
116 void (*aggregate)(struct msm_rpmrs_limits *limits);
117 void (*flush)(int notify_rpm);
118 void (*notify)(struct msm_rpm_notifier_data *rpm_notifier_cb);
119 struct kobj_attribute ko_attr;
120};
121
122
123static struct msm_lpm_resource msm_lpm_l2 = {
124 .name = "l2",
125 .beyond_limits = msm_lpm_beyond_limits_l2,
126 .aggregate = msm_lpm_aggregate_l2,
127 .flush = msm_lpm_flush_l2,
128 .notify = NULL,
129 .valid = true,
130 .rs_data = {
131 .value = MSM_LPM_L2_CACHE_ACTIVE,
132 .default_value = MSM_LPM_L2_CACHE_ACTIVE,
133 },
134 .ko_attr = RPMRS_ATTR(l2),
135};
136
137static struct msm_lpm_resource msm_lpm_vdd_dig = {
138 .name = "vdd-dig",
139 .beyond_limits = msm_lpm_beyond_limits_vdd_dig,
140 .aggregate = msm_lpm_aggregate_vdd_dig,
141 .flush = msm_lpm_flush_vdd_dig,
142 .notify = msm_lpm_notify_vdd_dig,
143 .valid = false,
144 .rs_data = {
145 .value = VDD_DIG_ACTIVE,
146 .default_value = VDD_DIG_ACTIVE,
147 },
148 .ko_attr = RPMRS_ATTR(vdd_dig),
149};
150
151static struct msm_lpm_resource msm_lpm_vdd_mem = {
152 .name = "vdd-mem",
153 .beyond_limits = msm_lpm_beyond_limits_vdd_mem,
154 .aggregate = msm_lpm_aggregate_vdd_mem,
155 .flush = msm_lpm_flush_vdd_mem,
156 .notify = msm_lpm_notify_vdd_mem,
157 .valid = false,
158 .rs_data = {
159 .value = VDD_MEM_ACTIVE,
160 .default_value = VDD_MEM_ACTIVE,
161 },
162 .ko_attr = RPMRS_ATTR(vdd_mem),
163};
164
165static struct msm_lpm_resource msm_lpm_pxo = {
166 .name = "pxo",
167 .beyond_limits = msm_lpm_beyond_limits_pxo,
168 .aggregate = msm_lpm_aggregate_pxo,
169 .flush = msm_lpm_flush_pxo,
170 .notify = msm_lpm_notify_pxo,
171 .valid = false,
172 .rs_data = {
173 .value = MSM_LPM_PXO_ON,
174 .default_value = MSM_LPM_PXO_ON,
175 },
176 .ko_attr = RPMRS_ATTR(pxo),
177};
178
179static struct msm_lpm_resource *msm_lpm_resources[] = {
180 &msm_lpm_vdd_dig,
181 &msm_lpm_vdd_mem,
182 &msm_lpm_pxo,
183 &msm_lpm_l2,
184};
185
186static struct msm_lpm_resource msm_lpm_rpm_ctl = {
187 .name = "rpm_ctl",
188 .beyond_limits = NULL,
189 .aggregate = NULL,
190 .flush = msm_lpm_flush_rpm_ctl,
191 .valid = true,
192 .ko_attr = RPMRS_ATTR(rpm_ctl),
193};
194
195static struct notifier_block msm_lpm_rpm_nblk = {
196 .notifier_call = msm_lpm_rpm_callback,
197};
198
199static struct notifier_block __refdata msm_lpm_cpu_nblk = {
200 .notifier_call = msm_lpm_cpu_callback,
201};
202
203static DEFINE_SPINLOCK(msm_lpm_sysfs_lock);
204
205/* Attribute Definitions */
206static struct attribute *msm_lpm_attributes[] = {
207 &msm_lpm_vdd_dig.ko_attr.attr,
208 &msm_lpm_vdd_mem.ko_attr.attr,
209 &msm_lpm_pxo.ko_attr.attr,
210 &msm_lpm_l2.ko_attr.attr,
211 NULL,
212};
213
214static struct attribute_group msm_lpm_attribute_group = {
215 .attrs = msm_lpm_attributes,
216};
217
218static struct attribute *msm_lpm_rpm_ctl_attribute[] = {
219 &msm_lpm_rpm_ctl.ko_attr.attr,
220 NULL,
221};
222
223static struct attribute_group msm_lpm_rpm_ctl_attr_group = {
224 .attrs = msm_lpm_rpm_ctl_attribute,
225};
226
227#define GET_RS_FROM_ATTR(attr) \
228 (container_of(attr, struct msm_lpm_resource, ko_attr))
229
230/* RPM */
231static struct msm_rpm_request *msm_lpm_create_rpm_request
232 (uint32_t rsc_type, uint32_t rsc_id)
233{
234 struct msm_rpm_request *handle = NULL;
235
236 handle = msm_rpm_create_request(MSM_RPM_CTX_SLEEP_SET,
237 rsc_type,
238 rsc_id, 1);
239 return handle;
240}
241
242static int msm_lpm_send_sleep_data(struct msm_rpm_request *handle,
243 uint32_t key, uint8_t *value)
244{
245 int ret = 0;
246
247 if (!handle)
248 return ret;
249
250 ret = msm_rpm_add_kvp_data_noirq(handle, key, value, MAX_RS_SIZE);
251
252 if (ret < 0) {
253 pr_err("%s: Error adding kvp data key %u, size %d\n",
254 __func__, key, MAX_RS_SIZE);
255 return ret;
256 }
257
258 ret = msm_rpm_send_request_noirq(handle);
259 if (ret < 0) {
260 pr_err("%s: Error sending RPM request key %u, handle 0x%x\n",
261 __func__, key, (unsigned int)handle);
262 return ret;
263 }
264 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_RPM)
265 pr_info("Rs key %u, value %u, size %d\n", key,
266 *(unsigned int *)value, MAX_RS_SIZE);
267 return ret;
268}
269
270/* RPM Notifier */
271static int msm_lpm_rpm_callback(struct notifier_block *rpm_nb,
272 unsigned long action,
273 void *rpm_notif)
274{
275 int i;
276 struct msm_lpm_resource *rs = NULL;
277 struct msm_rpm_notifier_data *rpm_notifier_cb =
278 (struct msm_rpm_notifier_data *)rpm_notif;
279
280 if (!msm_lpm_get_rpm_notif)
281 return NOTIFY_DONE;
282
283 if (!(rpm_nb && rpm_notif))
284 return NOTIFY_BAD;
285
286 for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
287 rs = msm_lpm_resources[i];
288 if (rs && rs->valid && rs->notify)
289 rs->notify(rpm_notifier_cb);
290 }
291
292 return NOTIFY_OK;
293}
294
295/* SYSFS */
296static ssize_t msm_lpm_resource_attr_show(
297 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
298{
299 struct kernel_param kp;
300 unsigned long flags;
301 unsigned int temp;
302 int rc;
303
304 spin_lock_irqsave(&msm_lpm_sysfs_lock, flags);
305 temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
306 spin_unlock_irqrestore(&msm_lpm_sysfs_lock, flags);
307
308 kp.arg = &temp;
309 rc = param_get_uint(buf, &kp);
310
311 if (rc > 0) {
312 strlcat(buf, "\n", PAGE_SIZE);
313 rc++;
314 }
315
316 return rc;
317}
318
319static ssize_t msm_lpm_resource_attr_store(struct kobject *kobj,
320 struct kobj_attribute *attr, const char *buf, size_t count)
321{
322 struct kernel_param kp;
323 unsigned long flags;
324 unsigned int temp;
325 int rc;
326
327 kp.arg = &temp;
328 rc = param_set_uint(buf, &kp);
329 if (rc)
330 return rc;
331
332 spin_lock_irqsave(&msm_lpm_sysfs_lock, flags);
333 GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
334
335 if (IS_RPM_CTL(GET_RS_FROM_ATTR(attr))) {
336 struct msm_lpm_resource *rs = GET_RS_FROM_ATTR(attr);
337 rs->flush(false);
338 }
339
340 spin_unlock_irqrestore(&msm_lpm_sysfs_lock, flags);
341
342 return count;
343}
344
345/* lpm resource handling functions */
346/* Common */
347static void msm_lpm_notify_common(struct msm_rpm_notifier_data *rpm_notifier_cb,
348 struct msm_lpm_resource *rs)
349{
350 if ((rpm_notifier_cb->rsc_type == rs->rs_data.type) &&
351 (rpm_notifier_cb->rsc_id == rs->rs_data.id) &&
352 (rpm_notifier_cb->key == rs->rs_data.key)) {
353 BUG_ON(rpm_notifier_cb->size > MAX_RS_SIZE);
354
355 if (rs->valid) {
356 if (rpm_notifier_cb->value)
357 memcpy(&rs->rs_data.value,
358 rpm_notifier_cb->value, rpm_notifier_cb->size);
359 else
360 rs->rs_data.value = rs->rs_data.default_value;
361
362 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_RPM)
363 pr_info("Notification received Rs %s value %u\n",
364 rs->name, rs->rs_data.value);
365 }
366 }
367}
368
369/* L2 */
370static bool msm_lpm_beyond_limits_l2(struct msm_rpmrs_limits *limits)
371{
372 uint32_t l2;
373 bool ret = true;
374 struct msm_lpm_resource *rs = &msm_lpm_l2;
375
376 if (rs->valid) {
377 uint32_t l2_buf = rs->rs_data.value;
378
379 if (rs->enable_low_power == 1)
380 l2 = MSM_LPM_L2_CACHE_GDHS;
381 else if (rs->enable_low_power == 2)
382 l2 = MSM_LPM_L2_CACHE_HSFS_OPEN;
383 else
384 l2 = MSM_LPM_L2_CACHE_ACTIVE ;
385
386 if (l2_buf > l2)
387 l2 = l2_buf;
388 ret = (l2 > limits->l2_cache);
389
390 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_L2)
391 pr_info("%s: l2 buf %u, l2 %u, limits %u\n",
392 __func__, l2_buf, l2, limits->l2_cache);
393 }
394 return ret;
395}
396
397static void msm_lpm_aggregate_l2(struct msm_rpmrs_limits *limits)
398{
399 struct msm_lpm_resource *rs = &msm_lpm_l2;
400
401 if (rs->valid)
402 rs->sleep_value = limits->l2_cache;
403}
404
405static void msm_lpm_flush_l2(int notify_rpm)
406{
407 struct msm_lpm_resource *rs = &msm_lpm_l2;
408 int lpm;
409 int rc;
410
411 switch (rs->sleep_value) {
412 case MSM_LPM_L2_CACHE_HSFS_OPEN:
413 lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
414 msm_pm_set_l2_flush_flag(1);
415 break;
416 case MSM_LPM_L2_CACHE_GDHS:
417 lpm = MSM_SPM_L2_MODE_GDHS;
418 break;
419 case MSM_LPM_L2_CACHE_RETENTION:
420 lpm = MSM_SPM_L2_MODE_RETENTION;
421 break;
422 default:
423 case MSM_LPM_L2_CACHE_ACTIVE:
424 lpm = MSM_SPM_L2_MODE_DISABLED;
425 break;
426 }
427
428 rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
429
430 if (rc < 0)
431 pr_err("%s: Failed to set L2 low power mode %d",
432 __func__, lpm);
433
434 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_L2)
435 pr_info("%s: Requesting low power mode %d\n",
436 __func__, lpm);
437}
438
439/* RPM CTL */
440static void msm_lpm_flush_rpm_ctl(int notify_rpm)
441{
442 struct msm_lpm_resource *rs = &msm_lpm_rpm_ctl;
443 msm_lpm_send_sleep_data(rs->rs_data.handle,
444 rs->rs_data.key,
445 (uint8_t *)&rs->sleep_value);
446}
447
448/*VDD Dig*/
449static bool msm_lpm_beyond_limits_vdd_dig(struct msm_rpmrs_limits *limits)
450{
451 bool ret = true;
452 struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
453
454 if (rs->valid) {
455 uint32_t vdd_buf = rs->rs_data.value;
456 uint32_t vdd_dig = rs->enable_low_power ? rs->enable_low_power :
457 rs->rs_data.default_value;
458
459 if (vdd_buf > vdd_dig)
460 vdd_dig = vdd_buf;
461
462 ret = (vdd_dig > limits->vdd_dig_upper_bound);
463
464 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_VDD_DIG)
465 pr_info("%s:buf %d vdd dig %d limits%d\n",
466 __func__, vdd_buf, vdd_dig,
467 limits->vdd_dig_upper_bound);
468 }
469 return ret;
470}
471
472static void msm_lpm_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
473{
474 struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
475
476 if (rs->valid) {
477 uint32_t vdd_buf = rs->rs_data.value;
478 if (limits->vdd_dig_lower_bound > vdd_buf)
479 rs->sleep_value = limits->vdd_dig_lower_bound;
480 else
481 rs->sleep_value = vdd_buf;
482 }
483}
484
485static void msm_lpm_flush_vdd_dig(int notify_rpm)
486{
487 if (notify_rpm) {
488 struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
489 msm_lpm_send_sleep_data(rs->rs_data.handle,
490 rs->rs_data.key,
491 (uint8_t *)&rs->sleep_value);
492 }
493}
494
495static void msm_lpm_notify_vdd_dig(struct msm_rpm_notifier_data
496 *rpm_notifier_cb)
497{
498 struct msm_lpm_resource *rs = &msm_lpm_vdd_dig;
499 msm_lpm_notify_common(rpm_notifier_cb, rs);
500}
501
502/*VDD Mem*/
503static bool msm_lpm_beyond_limits_vdd_mem(struct msm_rpmrs_limits *limits)
504{
505 bool ret = true;
506 struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
507
508 if (rs->valid) {
509 uint32_t vdd_buf = rs->rs_data.value;
510 uint32_t vdd_mem = rs->enable_low_power ? rs->enable_low_power :
511 rs->rs_data.default_value;
512
513 if (vdd_buf > vdd_mem)
514 vdd_mem = vdd_buf;
515
516 ret = (vdd_mem > limits->vdd_mem_upper_bound);
517
518 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_VDD_MEM)
519 pr_info("%s:buf %d vdd mem %d limits%d\n",
520 __func__, vdd_buf, vdd_mem,
521 limits->vdd_mem_upper_bound);
522 }
523 return ret;
524}
525
526static void msm_lpm_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
527{
528 struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
529
530 if (rs->valid) {
531 uint32_t vdd_buf = rs->rs_data.value;
532 if (limits->vdd_mem_lower_bound > vdd_buf)
533 rs->sleep_value = limits->vdd_mem_lower_bound;
534 else
535 rs->sleep_value = vdd_buf;
536 }
537}
538
539static void msm_lpm_flush_vdd_mem(int notify_rpm)
540{
541 if (notify_rpm) {
542 struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
543 msm_lpm_send_sleep_data(rs->rs_data.handle,
544 rs->rs_data.key,
545 (uint8_t *)&rs->sleep_value);
546 }
547}
548
549static void msm_lpm_notify_vdd_mem(struct msm_rpm_notifier_data
550 *rpm_notifier_cb)
551{
552 struct msm_lpm_resource *rs = &msm_lpm_vdd_mem;
553 msm_lpm_notify_common(rpm_notifier_cb, rs);
554}
555
556/*PXO*/
557static bool msm_lpm_beyond_limits_pxo(struct msm_rpmrs_limits *limits)
558{
559 bool ret = true;
560 struct msm_lpm_resource *rs = &msm_lpm_pxo;
561
562 if (rs->valid) {
563 uint32_t pxo_buf = rs->rs_data.value;
564 uint32_t pxo = rs->enable_low_power ? MSM_LPM_PXO_OFF :
565 rs->rs_data.default_value;
566
567 if (pxo_buf > pxo)
568 pxo = pxo_buf;
569
570 ret = (pxo > limits->pxo);
571
572 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_PXO)
573 pr_info("%s:pxo buf %d pxo %d limits pxo %d\n",
574 __func__, pxo_buf, pxo, limits->pxo);
575 }
576 return ret;
577}
578
579static void msm_lpm_aggregate_pxo(struct msm_rpmrs_limits *limits)
580{
581 struct msm_lpm_resource *rs = &msm_lpm_pxo;
582
583 if (rs->valid) {
584 uint32_t pxo_buf = rs->rs_data.value;
585 if (limits->pxo > pxo_buf)
586 rs->sleep_value = limits->pxo;
587 else
588 rs->sleep_value = pxo_buf;
589
590 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_PXO)
591 pr_info("%s: pxo buf %d sleep value %d\n",
592 __func__, pxo_buf, rs->sleep_value);
593 }
594}
595
596static void msm_lpm_flush_pxo(int notify_rpm)
597{
598 if (notify_rpm) {
599 struct msm_lpm_resource *rs = &msm_lpm_pxo;
600 msm_lpm_send_sleep_data(rs->rs_data.handle,
601 rs->rs_data.key,
602 (uint8_t *)&rs->sleep_value);
603 }
604}
605
606static void msm_lpm_notify_pxo(struct msm_rpm_notifier_data
607 *rpm_notifier_cb)
608{
609 struct msm_lpm_resource *rs = &msm_lpm_pxo;
610 msm_lpm_notify_common(rpm_notifier_cb, rs);
611}
612
613/* MPM
614static bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
615{
616 return ((limits->pxo == MSM_LPM_PXO_OFF) ||
617 (limits->vdd_dig_lower_bound <= VDD_DIG_RET_HIGH));
618}*/
619
620/* LPM levels interface */
621bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits)
622{
623 int i;
624 struct msm_lpm_resource *rs;
625 bool beyond_limit = false;
626
627 for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
628 rs = msm_lpm_resources[i];
629 if (rs->beyond_limits && rs->beyond_limits(limits)) {
630 beyond_limit = true;
631 if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_LVLS)
632 pr_info("%s: %s beyond limit", __func__,
633 rs->name);
634 break;
635 }
636 }
637
638 return beyond_limit;
639}
640
641int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
642 bool from_idle, bool notify_rpm)
643{
644 int ret = 0;
645 int i;
646 struct msm_lpm_resource *rs = NULL;
647
648 for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
649 rs = msm_lpm_resources[i];
650 if (rs->aggregate)
651 rs->aggregate(limits);
652 }
653
654 msm_lpm_get_rpm_notif = false;
655 for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
656 rs = msm_lpm_resources[i];
657 if (rs->flush)
658 rs->flush(notify_rpm);
659 }
660 msm_lpm_get_rpm_notif = true;
661
662 /* MPM Enter sleep
663 if (msm_lpm_use_mpm(limits))
664 msm_mpm_enter_sleep(from_idle);*/
665
666 return ret;
667}
668
669void msm_lpmrs_exit_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
670 bool from_idle, bool notify_rpm)
671{
672 /* MPM exit sleep
673 if (msm_lpm_use_mpm(limits))
674 msm_mpm_exit_sleep(from_idle);*/
675}
676
677static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
678 unsigned long action, void *hcpu)
679{
680 struct msm_lpm_resource *rs = &msm_lpm_l2;
681 switch (action) {
682 case CPU_ONLINE_FROZEN:
683 case CPU_ONLINE:
684 if (num_online_cpus() > 1)
685 rs->rs_data.value = MSM_LPM_L2_CACHE_ACTIVE;
686 break;
687 case CPU_DEAD_FROZEN:
688 case CPU_DEAD:
689 if (num_online_cpus() == 1)
690 rs->rs_data.value = MSM_LPM_L2_CACHE_GDHS;
691 break;
692 }
693 return NOTIFY_OK;
694}
695
696/* RPM CTL */
697static int __devinit msm_lpm_init_rpm_ctl(void)
698{
699 struct msm_lpm_resource *rs = &msm_lpm_rpm_ctl;
700
701 rs->rs_data.handle = msm_rpm_create_request(
702 MSM_RPM_CTX_ACTIVE_SET,
703 rs->rs_data.type,
704 rs->rs_data.id, 1);
705 if (!rs->rs_data.handle)
706 return -EIO;
707
708 rs->valid = true;
709 return 0;
710}
711
712static int __devinit msm_lpm_resource_sysfs_add(void)
713{
714 struct kobject *module_kobj = NULL;
715 struct kobject *low_power_kobj = NULL;
716 struct kobject *mode_kobj = NULL;
717 int rc = 0;
718
719 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
720 if (!module_kobj) {
721 pr_err("%s: cannot find kobject for module %s\n",
722 __func__, KBUILD_MODNAME);
723 rc = -ENOENT;
724 goto resource_sysfs_add_exit;
725 }
726
727 low_power_kobj = kobject_create_and_add(
728 "enable_low_power", module_kobj);
729 if (!low_power_kobj) {
730 pr_err("%s: cannot create kobject\n", __func__);
731 rc = -ENOMEM;
732 goto resource_sysfs_add_exit;
733 }
734
735 mode_kobj = kobject_create_and_add(
736 "mode", module_kobj);
737 if (!mode_kobj) {
738 pr_err("%s: cannot create kobject\n", __func__);
739 rc = -ENOMEM;
740 goto resource_sysfs_add_exit;
741 }
742
743 rc = sysfs_create_group(low_power_kobj, &msm_lpm_attribute_group);
744 if (rc) {
745 pr_err("%s: cannot create kobject attribute group\n", __func__);
746 goto resource_sysfs_add_exit;
747 }
748
749 rc = sysfs_create_group(mode_kobj, &msm_lpm_rpm_ctl_attr_group);
750 if (rc) {
751 pr_err("%s: cannot create kobject attribute group\n", __func__);
752 goto resource_sysfs_add_exit;
753 }
754
755resource_sysfs_add_exit:
756 if (rc) {
757 if (low_power_kobj)
758 sysfs_remove_group(low_power_kobj,
759 &msm_lpm_attribute_group);
760 kobject_del(low_power_kobj);
761 kobject_del(mode_kobj);
762 }
763
764 return rc;
765}
766
767late_initcall(msm_lpm_resource_sysfs_add);
768
769static int __devinit msm_lpmrs_probe(struct platform_device *pdev)
770{
771 struct device_node *node = NULL;
772 char *key = NULL;
773 int ret = 0;
774
775 for_each_child_of_node(pdev->dev.of_node, node) {
776 struct msm_lpm_resource *rs = NULL;
777 const char *val;
778 int i;
779
780 key = "qcom,name";
781 ret = of_property_read_string(node, key, &val);
782 if (ret) {
783 pr_err("Cannot read string\n");
784 goto fail;
785 }
786
787 for (i = 0; i < ARRAY_SIZE(msm_lpm_resources); i++) {
788 char *lpmrs_name = msm_lpm_resources[i]->name;
789 if (!msm_lpm_resources[i]->valid &&
790 !strncmp(val, lpmrs_name, strnlen(lpmrs_name,
791 MAX_RS_NAME))) {
792 rs = msm_lpm_resources[i];
793 break;
794 }
795 }
796
797 if (!rs) {
798 pr_err("LPM resource not found\n");
799 continue;
800 }
801
802 key = "qcom,type";
803 ret = of_property_read_u32(node, key, &rs->rs_data.type);
804 if (ret) {
805 pr_err("Failed to read type\n");
806 goto fail;
807 }
808
809 key = "qcom,id";
810 ret = of_property_read_u32(node, key, &rs->rs_data.id);
811 if (ret) {
812 pr_err("Failed to read id\n");
813 goto fail;
814 }
815
816 key = "qcom,key";
817 ret = of_property_read_u32(node, key, &rs->rs_data.key);
818 if (ret) {
819 pr_err("Failed to read key\n");
820 goto fail;
821 }
822
823 rs->rs_data.handle = msm_lpm_create_rpm_request(
824 rs->rs_data.type, rs->rs_data.id);
825
826 if (!rs->rs_data.handle) {
827 pr_err("%s: Failed to allocate handle for %s\n",
828 __func__, rs->name);
829 ret = -1;
830 goto fail;
831 }
832
833 rs->valid = true;
834 }
835 msm_rpm_register_notifier(&msm_lpm_rpm_nblk);
836 msm_lpm_init_rpm_ctl();
837 register_hotcpu_notifier(&msm_lpm_cpu_nblk);
838 /* For UP mode, set the default to HSFS OPEN*/
839 if (num_possible_cpus() == 1) {
840 msm_lpm_l2.rs_data.default_value = MSM_LPM_L2_CACHE_HSFS_OPEN;
841 msm_lpm_l2.rs_data.value = MSM_LPM_L2_CACHE_HSFS_OPEN;
842 }
843 return 0;
844fail:
845 return ret;
846}
847
848static struct of_device_id msm_lpmrs_match_table[] = {
849 {.compatible = "qcom,lpm-resources"},
850 {},
851};
852
853static struct platform_driver msm_lpmrs_driver = {
854 .probe = msm_lpmrs_probe,
855 .driver = {
856 .name = "lpm-resources",
857 .owner = THIS_MODULE,
858 .of_match_table = msm_lpmrs_match_table,
859 },
860};
861
862int __init msm_lpmrs_module_init(void)
863{
864 return platform_driver_register(&msm_lpmrs_driver);
865}