blob: 0d70aa7dd2eaa6cf8c49d547352055ae7080617e [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/bug.h>
19#include <linux/mutex.h>
20#include <linux/proc_fs.h>
21#include <linux/spinlock.h>
22#include <linux/cpu.h>
23#include <mach/rpm.h>
24#include <mach/msm_iomap.h>
25#include <asm/mach-types.h>
26#include <linux/io.h>
Praveen Chidambaram841d46c2011-08-04 09:07:53 -060027#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include "mpm.h"
29#include "rpm_resources.h"
30#include "spm.h"
31
32/******************************************************************************
33 * Debug Definitions
34 *****************************************************************************/
35
36enum {
37 MSM_RPMRS_DEBUG_OUTPUT = BIT(0),
38 MSM_RPMRS_DEBUG_BUFFER = BIT(1),
39};
40
41static int msm_rpmrs_debug_mask;
42module_param_named(
43 debug_mask, msm_rpmrs_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
44);
45
46static struct msm_rpmrs_level *msm_rpmrs_levels;
47static int msm_rpmrs_level_count;
48
49static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits);
50static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits);
51static void msm_rpmrs_restore_pxo(void);
52static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits);
53static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits);
54static void msm_rpmrs_restore_l2_cache(void);
55static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits);
56static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
57static void msm_rpmrs_restore_vdd_mem(void);
58static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits);
59static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
60static void msm_rpmrs_restore_vdd_dig(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Praveen Chidambaram66775c62011-08-04 16:59:24 -060062static ssize_t msm_rpmrs_resource_attr_show(
63 struct kobject *kobj, struct kobj_attribute *attr, char *buf);
64static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
65 struct kobj_attribute *attr, const char *buf, size_t count);
66
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#ifdef CONFIG_MSM_L2_SPM
68static void *msm_rpmrs_l2_counter_addr;
69static int msm_rpmrs_l2_reset_count;
70#define L2_PC_COUNTER_ADDR 0x660
71#endif
72
73#define MSM_RPMRS_MAX_RS_REGISTER_COUNT 2
74
Praveen Chidambaram66775c62011-08-04 16:59:24 -060075#define RPMRS_ATTR(_name) \
76 __ATTR(_name, S_IRUGO|S_IWUSR, \
77 msm_rpmrs_resource_attr_show, msm_rpmrs_resource_attr_store)
78
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079struct msm_rpmrs_resource {
80 struct msm_rpm_iv_pair rs[MSM_RPMRS_MAX_RS_REGISTER_COUNT];
81 uint32_t size;
82 char *name;
83
84 uint32_t enable_low_power;
85
86 bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
87 void (*aggregate)(struct msm_rpmrs_limits *limits);
88 void (*restore)(void);
Praveen Chidambaram66775c62011-08-04 16:59:24 -060089
90 struct kobj_attribute ko_attr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091};
92
93static struct msm_rpmrs_resource msm_rpmrs_pxo = {
94 .rs[0].id = MSM_RPMRS_ID_PXO_CLK,
95 .size = 1,
96 .name = "pxo",
97 .beyond_limits = msm_rpmrs_pxo_beyond_limits,
98 .aggregate = msm_rpmrs_aggregate_pxo,
99 .restore = msm_rpmrs_restore_pxo,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600100 .ko_attr = RPMRS_ATTR(pxo),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101};
102
103static struct msm_rpmrs_resource msm_rpmrs_l2_cache = {
104 .rs[0].id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL,
105 .size = 1,
106 .name = "L2_cache",
107 .beyond_limits = msm_rpmrs_l2_cache_beyond_limits,
108 .aggregate = msm_rpmrs_aggregate_l2_cache,
109 .restore = msm_rpmrs_restore_l2_cache,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600110 .ko_attr = RPMRS_ATTR(L2_cache),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111};
112
113static struct msm_rpmrs_resource msm_rpmrs_vdd_mem = {
114 .rs[0].id = MSM_RPMRS_ID_VDD_MEM_0,
115 .rs[1].id = MSM_RPMRS_ID_VDD_MEM_1,
116 .size = 2,
117 .name = "vdd_mem",
118 .beyond_limits = msm_rpmrs_vdd_mem_beyond_limits,
119 .aggregate = msm_rpmrs_aggregate_vdd_mem,
120 .restore = msm_rpmrs_restore_vdd_mem,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600121 .ko_attr = RPMRS_ATTR(vdd_mem),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122};
123
124static struct msm_rpmrs_resource msm_rpmrs_vdd_dig = {
125 .rs[0].id = MSM_RPMRS_ID_VDD_DIG_0,
126 .rs[1].id = MSM_RPMRS_ID_VDD_DIG_1,
127 .size = 2,
128 .name = "vdd_dig",
129 .beyond_limits = msm_rpmrs_vdd_dig_beyond_limits,
130 .aggregate = msm_rpmrs_aggregate_vdd_dig,
131 .restore = msm_rpmrs_restore_vdd_dig,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600132 .ko_attr = RPMRS_ATTR(vdd_dig),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133};
134
135static struct msm_rpmrs_resource msm_rpmrs_rpm_cpu = {
136 .rs[0].id = MSM_RPMRS_ID_RPM_CTL,
137 .size = 1,
138 .name = "rpm_cpu",
139 .beyond_limits = NULL,
Eugene Seah78aa5e72011-07-18 18:28:37 -0600140 .aggregate = NULL,
141 .restore = NULL,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600142 .ko_attr = RPMRS_ATTR(rpm_cpu),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143};
144
145static struct msm_rpmrs_resource *msm_rpmrs_resources[] = {
146 &msm_rpmrs_pxo,
147 &msm_rpmrs_l2_cache,
148 &msm_rpmrs_vdd_mem,
149 &msm_rpmrs_vdd_dig,
150 &msm_rpmrs_rpm_cpu,
151};
152
153static uint32_t msm_rpmrs_buffer[MSM_RPM_ID_LAST + 1];
154static DECLARE_BITMAP(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
155static DECLARE_BITMAP(msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
156static DEFINE_SPINLOCK(msm_rpmrs_lock);
157
158#define MSM_RPMRS_VDD_MASK 0xfff
159#define MSM_RPMRS_VDD(v) ((v) & (MSM_RPMRS_VDD_MASK))
160
161/******************************************************************************
162 * Attribute Definitions
163 *****************************************************************************/
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600164static struct attribute *msm_rpmrs_attributes[] = {
165 &msm_rpmrs_pxo.ko_attr.attr,
166 &msm_rpmrs_l2_cache.ko_attr.attr,
167 &msm_rpmrs_vdd_mem.ko_attr.attr,
168 &msm_rpmrs_vdd_dig.ko_attr.attr,
169 &msm_rpmrs_rpm_cpu.ko_attr.attr,
170 NULL,
171};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600173static struct attribute_group msm_rpmrs_attribute_group = {
174 .attrs = msm_rpmrs_attributes,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175};
176
177#define GET_RS_FROM_ATTR(attr) \
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600178 (container_of(attr, struct msm_rpmrs_resource, ko_attr))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180
181/******************************************************************************
182 * Resource Specific Functions
183 *****************************************************************************/
184
185static void msm_rpmrs_aggregate_sclk(uint32_t sclk_count)
186{
187 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
188 set_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
189 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = sclk_count;
190 set_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
191}
192
193static void msm_rpmrs_restore_sclk(void)
194{
195 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
196 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = 0;
197 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
198 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
199}
200
201static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits)
202{
203 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
204 uint32_t pxo;
205
206 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
207 pxo = msm_rpmrs_buffer[rs->rs[0].id];
208 else
209 pxo = MSM_RPMRS_PXO_ON;
210
211 return pxo > limits->pxo;
212}
213
214static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits)
215{
216 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
217 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
218
219 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
220 rs->rs[0].value = *buf;
221 if (limits->pxo > *buf)
222 *buf = limits->pxo;
223 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
224 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
225 }
226}
227
228static void msm_rpmrs_restore_pxo(void)
229{
230 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
231
232 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
233 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
234}
235
236static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
237{
238 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
239 uint32_t l2_cache;
240
241 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
242 l2_cache = msm_rpmrs_buffer[rs->rs[0].id];
243 else
244 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
245
246 return l2_cache > limits->l2_cache;
247}
248
249static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits)
250{
251 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
252 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
253
254 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
255 rs->rs[0].value = *buf;
256 if (limits->l2_cache > *buf)
257 *buf = limits->l2_cache;
258
259 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
260 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
261 }
262}
263
264#ifdef CONFIG_MSM_L2_SPM
265static bool msm_spm_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
266{
267 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
268 uint32_t l2_cache = rs->rs[0].value;
269
270 if (!rs->enable_low_power)
271 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
272
273 return l2_cache > limits->l2_cache;
274}
275#endif
276
277static void msm_rpmrs_restore_l2_cache(void)
278{
279 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
280
281 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
282 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
283}
284
285static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits)
286{
287 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
288 uint32_t vdd_mem;
289
290 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
291 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
292
293 if (rs->enable_low_power == 0)
294 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
295 else if (rs->enable_low_power == 1)
296 vdd_mem = MSM_RPMRS_VDD_MEM_RET_HIGH;
297 else
298 vdd_mem = MSM_RPMRS_VDD_MEM_RET_LOW;
299
300 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_mem))
301 vdd_mem = buffered_value;
302 } else {
303 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
304 }
305
306 return MSM_RPMRS_VDD(vdd_mem) >=
307 MSM_RPMRS_VDD(limits->vdd_mem_upper_bound);
308}
309
310static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
311{
312 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
313 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
314
315 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
316 rs->rs[0].value = *buf;
317 if (MSM_RPMRS_VDD(limits->vdd_mem) > MSM_RPMRS_VDD(*buf)) {
318 *buf &= ~MSM_RPMRS_VDD_MASK;
319 *buf |= MSM_RPMRS_VDD(limits->vdd_mem);
320 }
321
322 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
323 pr_info("%s: vdd %d (0x%x)\n", __func__,
324 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
325 }
326}
327
328static void msm_rpmrs_restore_vdd_mem(void)
329{
330 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
331
332 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
333 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
334}
335
336static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits)
337{
338 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
339 uint32_t vdd_dig;
340
341 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
342 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
343
344 if (rs->enable_low_power == 0)
345 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
346 else if (rs->enable_low_power == 1)
347 vdd_dig = MSM_RPMRS_VDD_DIG_RET_HIGH;
348 else
349 vdd_dig = MSM_RPMRS_VDD_DIG_RET_LOW;
350
351 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_dig))
352 vdd_dig = buffered_value;
353 } else {
354 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
355 }
356
357 return MSM_RPMRS_VDD(vdd_dig) >=
358 MSM_RPMRS_VDD(limits->vdd_dig_upper_bound);
359}
360
361static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
362{
363 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
364 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
365
366 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
367 rs->rs[0].value = *buf;
368 if (MSM_RPMRS_VDD(limits->vdd_dig) > MSM_RPMRS_VDD(*buf)) {
369 *buf &= ~MSM_RPMRS_VDD_MASK;
370 *buf |= MSM_RPMRS_VDD(limits->vdd_dig);
371 }
372
373
374 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
375 pr_info("%s: vdd %d (0x%x)\n", __func__,
376 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
377 }
378}
379
380static void msm_rpmrs_restore_vdd_dig(void)
381{
382 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
383
384 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
385 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
386}
387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388/******************************************************************************
389 * Buffering Functions
390 *****************************************************************************/
391
392static bool msm_rpmrs_irqs_detectable(struct msm_rpmrs_limits *limits,
393 bool irqs_detect, bool gpio_detect)
394{
395
396 if (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH)
397 return irqs_detect;
398
399 if (limits->pxo == MSM_RPMRS_PXO_OFF)
400 return gpio_detect;
401
402 return true;
403}
404
405static bool msm_rpmrs_use_mpm(struct msm_rpmrs_limits *limits)
406{
407 return (limits->pxo == MSM_RPMRS_PXO_OFF) ||
408 (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH);
409}
410
411static void msm_rpmrs_update_levels(void)
412{
413 int i, k;
414
415 for (i = 0; i < msm_rpmrs_level_count; i++) {
416 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
417
418 if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
419 continue;
420
421 level->available = true;
422
423 for (k = 0; k < ARRAY_SIZE(msm_rpmrs_resources); k++) {
424 struct msm_rpmrs_resource *rs = msm_rpmrs_resources[k];
425
426 if (rs->beyond_limits &&
427 rs->beyond_limits(&level->rs_limits)) {
428 level->available = false;
429 break;
430 }
431 }
432 }
433}
434
435/*
436 * Return value:
437 * 0: no entries in <req> is on our resource list
438 * 1: one or more entries in <req> is on our resource list
439 * -EINVAL: invalid id in <req> array
440 */
441static int msm_rpmrs_buffer_request(struct msm_rpm_iv_pair *req, int count)
442{
443 bool listed;
444 int i;
445
446 for (i = 0; i < count; i++)
447 if (req[i].id > MSM_RPM_ID_LAST)
448 return -EINVAL;
449
450 for (i = 0, listed = false; i < count; i++) {
451 msm_rpmrs_buffer[req[i].id] = req[i].value;
452 set_bit(req[i].id, msm_rpmrs_buffered);
453
454 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
455 pr_info("%s: reg %d: 0x%x\n",
456 __func__, req[i].id, req[i].value);
457
458 if (listed)
459 continue;
460
461 if (test_bit(req[i].id, msm_rpmrs_listed))
462 listed = true;
463 }
464
465 return listed ? 1 : 0;
466}
467
468/*
469 * Return value:
470 * 0: no entries in <req> is on our resource list
471 * 1: one or more entries in <req> is on our resource list
472 * -EINVAL: invalid id in <req> array
473 */
474static int msm_rpmrs_clear_buffer(struct msm_rpm_iv_pair *req, int count)
475{
476 bool listed;
477 int i;
478
479 for (i = 0; i < count; i++)
480 if (req[i].id > MSM_RPM_ID_LAST)
481 return -EINVAL;
482
483 for (i = 0, listed = false; i < count; i++) {
484 msm_rpmrs_buffer[req[i].id] = 0;
485 clear_bit(req[i].id, msm_rpmrs_buffered);
486
487 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
488 pr_info("%s: reg %d\n", __func__, req[i].id);
489
490 if (listed)
491 continue;
492
493 if (test_bit(req[i].id, msm_rpmrs_listed))
494 listed = true;
495 }
496
497 return listed ? 1 : 0;
498}
499
500#ifdef CONFIG_MSM_L2_SPM
501static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
502{
503 int rc = 0;
504 int lpm;
505
506 switch (limits->l2_cache) {
507 case MSM_RPMRS_L2_CACHE_HSFS_OPEN:
508 lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
509 /* Increment the counter for TZ to init L2 on warmboot */
510 /* Barrier in msm_spm_l2_set_low_power_mode */
511 BUG_ON(!msm_rpmrs_l2_counter_addr);
512 writel_relaxed(++msm_rpmrs_l2_reset_count,
513 msm_rpmrs_l2_counter_addr);
514 break;
515 case MSM_RPMRS_L2_CACHE_GDHS:
516 lpm = MSM_SPM_L2_MODE_GDHS;
517 break;
518 case MSM_RPMRS_L2_CACHE_RETENTION:
519 lpm = MSM_SPM_L2_MODE_RETENTION;
520 break;
521 default:
522 case MSM_RPMRS_L2_CACHE_ACTIVE:
523 lpm = MSM_SPM_L2_MODE_DISABLED;
524 break;
525 }
526
527 rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
528 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
529 pr_info("%s: Requesting low power mode %d returned %d\n",
530 __func__, lpm, rc);
531
532 return rc;
533}
534#else
535static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
536{
537 return 0;
538}
539#endif
540
541static int msm_rpmrs_flush_buffer(
542 uint32_t sclk_count, struct msm_rpmrs_limits *limits, int from_idle)
543{
544 struct msm_rpm_iv_pair *req;
545 int count;
546 int rc;
547 int i;
548
549 msm_rpmrs_aggregate_sclk(sclk_count);
550 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
551 if (msm_rpmrs_resources[i]->aggregate)
552 msm_rpmrs_resources[i]->aggregate(limits);
553 }
554
555 count = bitmap_weight(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
556
557 req = kmalloc(sizeof(*req) * count, GFP_ATOMIC);
558 if (!req) {
559 rc = -ENOMEM;
560 goto flush_buffer_restore;
561 }
562
563 count = 0;
564 i = find_first_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
565
566 while (i < MSM_RPM_ID_LAST + 1) {
567 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
568 pr_info("%s: reg %d: 0x%x\n",
569 __func__, i, msm_rpmrs_buffer[i]);
570
571 req[count].id = i;
572 req[count].value = msm_rpmrs_buffer[i];
573 count++;
574
575 i = find_next_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST+1, i+1);
576 }
577
578 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_SLEEP, req, count);
579 kfree(req);
580
581 if (rc)
582 goto flush_buffer_restore;
583
584 bitmap_and(msm_rpmrs_buffered,
585 msm_rpmrs_buffered, msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
586
587flush_buffer_restore:
588 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
589 if (msm_rpmrs_resources[i]->restore)
590 msm_rpmrs_resources[i]->restore();
591 }
592 msm_rpmrs_restore_sclk();
593
594 if (rc)
595 pr_err("%s: failed: %d\n", __func__, rc);
596 return rc;
597}
598
599static int msm_rpmrs_set_common(
600 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
601{
602 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
603 unsigned long flags;
604 int rc;
605
606 spin_lock_irqsave(&msm_rpmrs_lock, flags);
607 rc = msm_rpmrs_buffer_request(req, count);
608 if (rc > 0) {
609 msm_rpmrs_update_levels();
610 rc = 0;
611 }
612 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
613
614 return rc;
615 }
616
617 if (noirq)
618 return msm_rpm_set_noirq(ctx, req, count);
619 else
620 return msm_rpm_set(ctx, req, count);
621}
622
623static int msm_rpmrs_clear_common(
624 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
625{
626 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
627 unsigned long flags;
628 int rc;
629
630 spin_lock_irqsave(&msm_rpmrs_lock, flags);
631 rc = msm_rpmrs_clear_buffer(req, count);
632 if (rc > 0) {
633 msm_rpmrs_update_levels();
634 rc = 0;
635 }
636 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
637
638 if (rc < 0)
639 return rc;
640 }
641
642 if (noirq)
643 return msm_rpm_clear_noirq(ctx, req, count);
644 else
645 return msm_rpm_clear(ctx, req, count);
646}
647
648/******************************************************************************
649 * Attribute Functions
650 *****************************************************************************/
651
652static ssize_t msm_rpmrs_resource_attr_show(
653 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
654{
655 struct kernel_param kp;
656 unsigned long flags;
657 unsigned int temp;
658 int rc;
659
660 spin_lock_irqsave(&msm_rpmrs_lock, flags);
661 temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
662 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
663
664 kp.arg = &temp;
665 rc = param_get_uint(buf, &kp);
666
667 if (rc > 0) {
668 strcat(buf, "\n");
669 rc++;
670 }
671
672 return rc;
673}
674
675static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
676 struct kobj_attribute *attr, const char *buf, size_t count)
677{
678 struct kernel_param kp;
679 unsigned long flags;
680 unsigned int temp;
681 int rc;
682
683 kp.arg = &temp;
684 rc = param_set_uint(buf, &kp);
685 if (rc)
686 return rc;
687
688 spin_lock_irqsave(&msm_rpmrs_lock, flags);
689 GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600690
691 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
692 if (GET_RS_FROM_ATTR(attr)->rs[0].id == MSM_RPMRS_ID_RPM_CTL) {
693 struct msm_rpm_iv_pair req;
694 req.id = MSM_RPMRS_ID_RPM_CTL;
695 req.value = GET_RS_FROM_ATTR(attr)->enable_low_power ? 0 : 1;
696
697 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &req, 1);
698 if (rc) {
699 pr_err("%s: failed to request RPM_CTL to %d: %d\n",
700 __func__, req.value, rc);
701 }
702 }
703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 msm_rpmrs_update_levels();
705 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
706
707 return count;
708}
709
710static int __init msm_rpmrs_resource_sysfs_add(void)
711{
712 struct kobject *module_kobj;
713 struct kobject *low_power_kboj;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 int rc;
715
716 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
717 if (!module_kobj) {
718 pr_err("%s: cannot find kobject for module %s\n",
719 __func__, KBUILD_MODNAME);
720 rc = -ENOENT;
721 goto resource_sysfs_add_exit;
722 }
723
724 low_power_kboj = kobject_create_and_add(
725 "enable_low_power", module_kobj);
726 if (!low_power_kboj) {
727 pr_err("%s: cannot create kobject\n", __func__);
728 rc = -ENOMEM;
729 goto resource_sysfs_add_exit;
730 }
731
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600732 rc = sysfs_create_group(low_power_kboj, &msm_rpmrs_attribute_group);
733 if (rc) {
734 pr_err("%s: cannot create kobject attribute group\n", __func__);
735 goto resource_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736 }
737
738 rc = 0;
739
740resource_sysfs_add_exit:
741 return rc;
742}
743
744/******************************************************************************
745 * Public Functions
746 *****************************************************************************/
747
748int msm_rpmrs_set(int ctx, struct msm_rpm_iv_pair *req, int count)
749{
750 return msm_rpmrs_set_common(ctx, req, count, false);
751}
752
753int msm_rpmrs_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
754{
755 WARN(!irqs_disabled(), "msm_rpmrs_set_noirq can only be called "
756 "safely when local irqs are disabled. Consider using "
757 "msm_rpmrs_set or msm_rpmrs_set_nosleep instead.");
758 return msm_rpmrs_set_common(ctx, req, count, true);
759}
760
761int msm_rpmrs_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
762{
763 return msm_rpmrs_clear_common(ctx, req, count, false);
764}
765
766int msm_rpmrs_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
767{
768 WARN(!irqs_disabled(), "msm_rpmrs_clear_noirq can only be called "
769 "safely when local irqs are disabled. Consider using "
770 "msm_rpmrs_clear or msm_rpmrs_clear_nosleep instead.");
771 return msm_rpmrs_clear_common(ctx, req, count, true);
772}
773
774void msm_rpmrs_show_resources(void)
775{
776 struct msm_rpmrs_resource *rs;
777 unsigned long flags;
778 int i;
779
780 spin_lock_irqsave(&msm_rpmrs_lock, flags);
781 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
782 rs = msm_rpmrs_resources[i];
783 if (rs->rs[0].id < MSM_RPM_ID_LAST + 1)
784 pr_info("%s: resource %s: buffered %d, value 0x%x\n",
785 __func__, rs->name,
786 test_bit(rs->rs[0].id, msm_rpmrs_buffered),
787 msm_rpmrs_buffer[rs->rs[0].id]);
788 else
789 pr_info("%s: resource %s: value %d\n",
790 __func__, rs->name, rs->rs[0].value);
791 }
792 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
793}
794
795struct msm_rpmrs_limits *msm_rpmrs_lowest_limits(
796 bool from_idle, enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
797 uint32_t sleep_us)
798{
799 unsigned int cpu = smp_processor_id();
800 struct msm_rpmrs_level *best_level = NULL;
801 bool irqs_detectable = false;
802 bool gpio_detectable = false;
803 int i;
804
805 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
806 irqs_detectable = msm_mpm_irqs_detectable(from_idle);
807 gpio_detectable = msm_mpm_gpio_irqs_detectable(from_idle);
808 }
809
810 for (i = 0; i < msm_rpmrs_level_count; i++) {
811 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
812 uint32_t power;
813
814 if (!level->available)
815 continue;
816
817 if (sleep_mode != level->sleep_mode)
818 continue;
819
820 if (latency_us < level->latency_us)
821 continue;
822
823 if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
824 irqs_detectable, gpio_detectable))
825 continue;
826
827 if (sleep_us <= 1) {
828 power = level->energy_overhead;
829 } else if (sleep_us <= level->time_overhead_us) {
830 power = level->energy_overhead / sleep_us;
831 } else if ((sleep_us >> 10) > level->time_overhead_us) {
832 power = level->steady_state_power;
833 } else {
834 power = (sleep_us - level->time_overhead_us);
835 power *= level->steady_state_power;
836 power /= sleep_us;
837 power += level->energy_overhead / sleep_us;
838 }
839
840 if (!best_level ||
841 best_level->rs_limits.power[cpu] >= power) {
842 level->rs_limits.latency_us[cpu] = level->latency_us;
843 level->rs_limits.power[cpu] = power;
844 best_level = level;
845 }
846 }
847
848 return best_level ? &best_level->rs_limits : NULL;
849}
850
851int msm_rpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
852 bool from_idle, bool notify_rpm)
853{
854 int rc = 0;
855
856 rc = msm_rpmrs_flush_L2(limits, notify_rpm);
857 if (rc)
858 return rc;
859
860 if (notify_rpm) {
861 rc = msm_rpmrs_flush_buffer(sclk_count, limits, from_idle);
862 if (rc)
863 return rc;
864
865 if (msm_rpmrs_use_mpm(limits))
866 msm_mpm_enter_sleep(from_idle);
867 }
868
869 return rc;
870}
871
872void msm_rpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
873 bool from_idle, bool notify_rpm)
874{
875
876 /* Disable L2 for now, we dont want L2 to do retention by default */
877 msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
878
879 if (msm_rpmrs_use_mpm(limits))
880 msm_mpm_exit_sleep(from_idle);
881}
882
883#ifdef CONFIG_MSM_L2_SPM
884static int rpmrs_cpu_callback(struct notifier_block *nfb,
885 unsigned long action, void *hcpu)
886{
887 switch (action) {
888 case CPU_ONLINE_FROZEN:
889 case CPU_ONLINE:
890 if (num_online_cpus() > 1)
891 msm_rpmrs_l2_cache.rs[0].value =
892 MSM_RPMRS_L2_CACHE_ACTIVE;
893 break;
894 case CPU_DEAD_FROZEN:
895 case CPU_DEAD:
896 if (num_online_cpus() == 1)
897 msm_rpmrs_l2_cache.rs[0].value =
898 MSM_RPMRS_L2_CACHE_HSFS_OPEN;
899 break;
900 }
901
902 msm_rpmrs_update_levels();
903 return NOTIFY_OK;
904}
905
906static struct notifier_block __refdata rpmrs_cpu_notifier = {
907 .notifier_call = rpmrs_cpu_callback,
908};
909#endif
910
911int __init msm_rpmrs_levels_init(struct msm_rpmrs_level *levels, int size)
912{
913 msm_rpmrs_levels = kzalloc(sizeof(struct msm_rpmrs_level) * size,
914 GFP_KERNEL);
915 if (!msm_rpmrs_levels)
916 return -ENOMEM;
917 msm_rpmrs_level_count = size;
918 memcpy(msm_rpmrs_levels, levels, size * sizeof(struct msm_rpmrs_level));
919
920 return 0;
921}
922
923static int __init msm_rpmrs_init(void)
924{
925 struct msm_rpm_iv_pair req;
926 int rc;
927
Stepan Moskovchenko0302fbc2011-08-05 18:06:13 -0700928 if (cpu_is_apq8064())
929 return -ENODEV;
930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931 BUG_ON(!msm_rpmrs_levels);
932
Praveen Chidambaram841d46c2011-08-04 09:07:53 -0600933 if (cpu_is_msm8x60()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
935 req.value = 1;
936
937 rc = msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
938 if (rc) {
939 pr_err("%s: failed to request L2 cache: %d\n",
940 __func__, rc);
941 goto init_exit;
942 }
943
944 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
945 req.value = 0;
946
947 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_SLEEP, &req, 1);
948 if (rc) {
949 pr_err("%s: failed to initialize L2 cache for sleep: "
950 "%d\n", __func__, rc);
951 goto init_exit;
952 }
953 }
954
Eugene Seah78aa5e72011-07-18 18:28:37 -0600955 /* Enable RPM SWFI on Apps initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 req.id = MSM_RPMRS_ID_RPM_CTL;
957 req.value = 0;
958
Eugene Seah78aa5e72011-07-18 18:28:37 -0600959 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &req, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 if (rc) {
Eugene Seah78aa5e72011-07-18 18:28:37 -0600961 pr_err("%s: failed to initialize RPM halt: "
962 "%d\n", __func__, rc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 goto init_exit;
964 }
965
966 rc = msm_rpmrs_resource_sysfs_add();
967
968init_exit:
969 return rc;
970}
971device_initcall(msm_rpmrs_init);
972
973static int __init msm_rpmrs_early_init(void)
974{
975 int i, k;
976
977 /* Initialize listed bitmap for valid resource IDs */
978 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
979 for (k = 0; k < msm_rpmrs_resources[i]->size; k++)
980 set_bit(msm_rpmrs_resources[i]->rs[k].id,
981 msm_rpmrs_listed);
982 }
983
984 return 0;
985}
986early_initcall(msm_rpmrs_early_init);
987
988#ifdef CONFIG_MSM_L2_SPM
989static int __init msm_rpmrs_l2_counter_init(void)
990{
991 msm_rpmrs_l2_counter_addr = MSM_IMEM_BASE + L2_PC_COUNTER_ADDR;
992 writel_relaxed(msm_rpmrs_l2_reset_count, msm_rpmrs_l2_counter_addr);
993 mb();
994
995 msm_rpmrs_l2_cache.beyond_limits = msm_spm_l2_cache_beyond_limits;
996 msm_rpmrs_l2_cache.aggregate = NULL;
997 msm_rpmrs_l2_cache.restore = NULL;
998
999 register_hotcpu_notifier(&rpmrs_cpu_notifier);
1000
1001 return 0;
1002}
1003early_initcall(msm_rpmrs_l2_counter_init);
1004#endif