blob: ddd2c82ba5d7f46d77b571b507f808fe8efe3569 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/semaphore.h>
27#include <linux/spinlock.h>
28#include <asm/hardware/gic.h>
29#include <mach/msm_iomap.h>
30#include <mach/rpm.h>
31
32/******************************************************************************
33 * Data type and structure definitions
34 *****************************************************************************/
35
36struct msm_rpm_request {
37 struct msm_rpm_iv_pair *req;
38 int count;
39 uint32_t *ctx_mask_ack;
40 uint32_t *sel_masks_ack;
41 struct completion *done;
42};
43
44struct msm_rpm_notif_config {
45 struct msm_rpm_iv_pair iv[MSM_RPM_SEL_MASK_SIZE * 2];
46};
47
48#define configured_iv(notif_cfg) ((notif_cfg)->iv)
49#define registered_iv(notif_cfg) ((notif_cfg)->iv + MSM_RPM_SEL_MASK_SIZE)
50
51static struct msm_rpm_platform_data *msm_rpm_platform;
52static uint32_t msm_rpm_map[MSM_RPM_ID_LAST + 1];
53
54static DEFINE_MUTEX(msm_rpm_mutex);
55static DEFINE_SPINLOCK(msm_rpm_lock);
56static DEFINE_SPINLOCK(msm_rpm_irq_lock);
57
58static struct msm_rpm_request *msm_rpm_request;
59static struct msm_rpm_request msm_rpm_request_irq_mode;
60static struct msm_rpm_request msm_rpm_request_poll_mode;
61
62static LIST_HEAD(msm_rpm_notifications);
63static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT];
64static bool msm_rpm_init_notif_done;
65
66/******************************************************************************
67 * Internal functions
68 *****************************************************************************/
69
70static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg)
71{
72 return __raw_readl(msm_rpm_platform->reg_base_addrs[page] + reg * 4);
73}
74
75static inline void msm_rpm_write(
76 unsigned int page, unsigned int reg, uint32_t value)
77{
78 __raw_writel(value, msm_rpm_platform->reg_base_addrs[page] + reg * 4);
79}
80
81static inline void msm_rpm_read_contiguous(
82 unsigned int page, unsigned int reg, uint32_t *values, int count)
83{
84 int i;
85
86 for (i = 0; i < count; i++)
87 values[i] = msm_rpm_read(page, reg + i);
88}
89
90static inline void msm_rpm_write_contiguous(
91 unsigned int page, unsigned int reg, uint32_t *values, int count)
92{
93 int i;
94
95 for (i = 0; i < count; i++)
96 msm_rpm_write(page, reg + i, values[i]);
97}
98
99static inline void msm_rpm_write_contiguous_zeros(
100 unsigned int page, unsigned int reg, int count)
101{
102 int i;
103
104 for (i = 0; i < count; i++)
105 msm_rpm_write(page, reg + i, 0);
106}
107
108static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id)
109{
110 return (id > MSM_RPM_ID_LAST) ? MSM_RPM_SEL_LAST + 1 : msm_rpm_map[id];
111}
112
113/*
114 * Note: the function does not clear the masks before filling them.
115 *
116 * Return value:
117 * 0: success
118 * -EINVAL: invalid id in <req> array
119 */
120static int msm_rpm_fill_sel_masks(
121 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
122{
123 uint32_t sel;
124 int i;
125
126 for (i = 0; i < count; i++) {
127 sel = msm_rpm_map_id_to_sel(req[i].id);
128
129 if (sel > MSM_RPM_SEL_LAST)
130 return -EINVAL;
131
132 sel_masks[msm_rpm_get_sel_mask_reg(sel)] |=
133 msm_rpm_get_sel_mask(sel);
134 }
135
136 return 0;
137}
138
139static inline void msm_rpm_send_req_interrupt(void)
140{
141 __raw_writel(msm_rpm_platform->msm_apps_ipc_rpm_val,
142 msm_rpm_platform->msm_apps_ipc_rpm_reg);
143}
144
145/*
146 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
147 *
148 * Return value:
149 * 0: request acknowledgement
150 * 1: notification
151 * 2: spurious interrupt
152 */
153static int msm_rpm_process_ack_interrupt(void)
154{
155 uint32_t ctx_mask_ack;
156 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
157
158 ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0);
159 msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL,
160 MSM_RPM_CTRL_ACK_SEL_0, sel_masks_ack, MSM_RPM_SEL_MASK_SIZE);
161
162 if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) {
163 struct msm_rpm_notification *n;
164 int i;
165
166 list_for_each_entry(n, &msm_rpm_notifications, list)
167 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
168 if (sel_masks_ack[i] & n->sel_masks[i]) {
169 up(&n->sem);
170 break;
171 }
172
173 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
174 MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE);
175 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0);
176 /* Ensure the write is complete before return */
177 mb();
178
179 return 1;
180 }
181
182 if (msm_rpm_request) {
183 int i;
184
185 *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack;
186 memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack,
187 sizeof(sel_masks_ack));
188
189 for (i = 0; i < msm_rpm_request->count; i++)
190 msm_rpm_request->req[i].value =
191 msm_rpm_read(MSM_RPM_PAGE_ACK,
192 msm_rpm_request->req[i].id);
193
194 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
195 MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE);
196 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0);
197 /* Ensure the write is complete before return */
198 mb();
199
200 if (msm_rpm_request->done)
201 complete_all(msm_rpm_request->done);
202
203 msm_rpm_request = NULL;
204 return 0;
205 }
206
207 return 2;
208}
209
210static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id)
211{
212 unsigned long flags;
213 int rc;
214
215 if (dev_id != &msm_rpm_ack_interrupt)
216 return IRQ_NONE;
217
218 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
219 rc = msm_rpm_process_ack_interrupt();
220 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
221
222 return IRQ_HANDLED;
223}
224
225/*
226 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
227 */
228static void msm_rpm_busy_wait_for_request_completion(
229 bool allow_async_completion)
230{
231 int rc;
232
233 do {
234 while (!gic_is_spi_pending(msm_rpm_platform->irq_ack) &&
235 msm_rpm_request) {
236 if (allow_async_completion)
237 spin_unlock(&msm_rpm_irq_lock);
238 udelay(1);
239 if (allow_async_completion)
240 spin_lock(&msm_rpm_irq_lock);
241 }
242
243 if (!msm_rpm_request)
244 break;
245
246 rc = msm_rpm_process_ack_interrupt();
247 gic_clear_spi_pending(msm_rpm_platform->irq_ack);
248 } while (rc);
249}
250
251/* Upon return, the <req> array will contain values from the ack page.
252 *
253 * Note: assumes caller has acquired <msm_rpm_mutex>.
254 *
255 * Return value:
256 * 0: success
257 * -ENOSPC: request rejected
258 */
259static int msm_rpm_set_exclusive(int ctx,
260 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
261{
262 DECLARE_COMPLETION_ONSTACK(ack);
263 unsigned long flags;
264 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
265 uint32_t ctx_mask_ack;
266 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
267 int i;
268
269 msm_rpm_request_irq_mode.req = req;
270 msm_rpm_request_irq_mode.count = count;
271 msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
272 msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
273 msm_rpm_request_irq_mode.done = &ack;
274
275 spin_lock_irqsave(&msm_rpm_lock, flags);
276 spin_lock(&msm_rpm_irq_lock);
277
278 BUG_ON(msm_rpm_request);
279 msm_rpm_request = &msm_rpm_request_irq_mode;
280
281 for (i = 0; i < count; i++) {
282 BUG_ON(req[i].id > MSM_RPM_ID_LAST);
283 msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value);
284 }
285
286 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
287 MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE);
288 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask);
289
290 /* Ensure RPM data is written before sending the interrupt */
291 mb();
292 msm_rpm_send_req_interrupt();
293
294 spin_unlock(&msm_rpm_irq_lock);
295 spin_unlock_irqrestore(&msm_rpm_lock, flags);
296
297 wait_for_completion(&ack);
298
299 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
300 != ctx_mask);
301 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
302
303 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
304 ? -ENOSPC : 0;
305}
306
307/* Upon return, the <req> array will contain values from the ack page.
308 *
309 * Note: assumes caller has acquired <msm_rpm_lock>.
310 *
311 * Return value:
312 * 0: success
313 * -ENOSPC: request rejected
314 */
315static int msm_rpm_set_exclusive_noirq(int ctx,
316 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
317{
318 unsigned int irq = msm_rpm_platform->irq_ack;
319 unsigned long flags;
320 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
321 uint32_t ctx_mask_ack;
322 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
323 int i;
324
325 msm_rpm_request_poll_mode.req = req;
326 msm_rpm_request_poll_mode.count = count;
327 msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack;
328 msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack;
329 msm_rpm_request_poll_mode.done = NULL;
330
331 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
332 irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
333
334 if (msm_rpm_request) {
335 msm_rpm_busy_wait_for_request_completion(true);
336 BUG_ON(msm_rpm_request);
337 }
338
339 msm_rpm_request = &msm_rpm_request_poll_mode;
340
341 for (i = 0; i < count; i++) {
342 BUG_ON(req[i].id > MSM_RPM_ID_LAST);
343 msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value);
344 }
345
346 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
347 MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE);
348 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask);
349
350 /* Ensure RPM data is written before sending the interrupt */
351 mb();
352 msm_rpm_send_req_interrupt();
353
354 msm_rpm_busy_wait_for_request_completion(false);
355 BUG_ON(msm_rpm_request);
356
357 irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
358 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
359
360 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
361 != ctx_mask);
362 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
363
364 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
365 ? -ENOSPC : 0;
366}
367
368/* Upon return, the <req> array will contain values from the ack page.
369 *
370 * Return value:
371 * 0: success
372 * -EINTR: interrupted
373 * -EINVAL: invalid <ctx> or invalid id in <req> array
374 * -ENOSPC: request rejected
375 */
376static int msm_rpm_set_common(
377 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
378{
379 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
380 int rc;
381
382 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
383 rc = -EINVAL;
384 goto set_common_exit;
385 }
386
387 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
388 if (rc)
389 goto set_common_exit;
390
391 if (noirq) {
392 unsigned long flags;
393
394 spin_lock_irqsave(&msm_rpm_lock, flags);
395 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count);
396 spin_unlock_irqrestore(&msm_rpm_lock, flags);
397 } else {
398 rc = mutex_lock_interruptible(&msm_rpm_mutex);
399 if (rc)
400 goto set_common_exit;
401
402 rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count);
403 mutex_unlock(&msm_rpm_mutex);
404 }
405
406set_common_exit:
407 return rc;
408}
409
410/*
411 * Return value:
412 * 0: success
413 * -EINTR: interrupted
414 * -EINVAL: invalid <ctx> or invalid id in <req> array
415 */
416static int msm_rpm_clear_common(
417 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
418{
419 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
420 struct msm_rpm_iv_pair r[MSM_RPM_SEL_MASK_SIZE];
421 int rc;
422 int i;
423
424 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
425 rc = -EINVAL;
426 goto clear_common_exit;
427 }
428
429 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
430 if (rc)
431 goto clear_common_exit;
432
433 for (i = 0; i < ARRAY_SIZE(r); i++) {
434 r[i].id = MSM_RPM_ID_INVALIDATE_0 + i;
435 r[i].value = sel_masks[i];
436 }
437
438 memset(sel_masks, 0, sizeof(sel_masks));
439 sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_INVALIDATE)] |=
440 msm_rpm_get_sel_mask(MSM_RPM_SEL_INVALIDATE);
441
442 if (noirq) {
443 unsigned long flags;
444
445 spin_lock_irqsave(&msm_rpm_lock, flags);
446 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r,
447 ARRAY_SIZE(r));
448 spin_unlock_irqrestore(&msm_rpm_lock, flags);
449 BUG_ON(rc);
450 } else {
451 rc = mutex_lock_interruptible(&msm_rpm_mutex);
452 if (rc)
453 goto clear_common_exit;
454
455 rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r));
456 mutex_unlock(&msm_rpm_mutex);
457 BUG_ON(rc);
458 }
459
460clear_common_exit:
461 return rc;
462}
463
464/*
465 * Note: assumes caller has acquired <msm_rpm_mutex>.
466 */
467static void msm_rpm_update_notification(uint32_t ctx,
468 struct msm_rpm_notif_config *curr_cfg,
469 struct msm_rpm_notif_config *new_cfg)
470{
471 if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) {
472 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
473 int rc;
474
475 sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_NOTIFICATION)]
476 |= msm_rpm_get_sel_mask(MSM_RPM_SEL_NOTIFICATION);
477
478 rc = msm_rpm_set_exclusive(ctx,
479 sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv));
480 BUG_ON(rc);
481
482 memcpy(curr_cfg, new_cfg, sizeof(*new_cfg));
483 }
484}
485
486/*
487 * Note: assumes caller has acquired <msm_rpm_mutex>.
488 */
489static void msm_rpm_initialize_notification(void)
490{
491 struct msm_rpm_notif_config cfg;
492 unsigned int ctx;
493 int i;
494
495 for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) {
496 cfg = msm_rpm_notif_cfgs[ctx];
497
498 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) {
499 configured_iv(&cfg)[i].id =
500 MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i;
501 configured_iv(&cfg)[i].value = ~0UL;
502
503 registered_iv(&cfg)[i].id =
504 MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i;
505 registered_iv(&cfg)[i].value = 0;
506 }
507
508 msm_rpm_update_notification(ctx,
509 &msm_rpm_notif_cfgs[ctx], &cfg);
510 }
511}
512
513/******************************************************************************
514 * Public functions
515 *****************************************************************************/
516
517int msm_rpm_local_request_is_outstanding(void)
518{
519 unsigned long flags;
520 int outstanding = 0;
521
522 if (!spin_trylock_irqsave(&msm_rpm_lock, flags))
523 goto local_request_is_outstanding_exit;
524
525 if (!spin_trylock(&msm_rpm_irq_lock))
526 goto local_request_is_outstanding_unlock;
527
528 outstanding = (msm_rpm_request != NULL);
529 spin_unlock(&msm_rpm_irq_lock);
530
531local_request_is_outstanding_unlock:
532 spin_unlock_irqrestore(&msm_rpm_lock, flags);
533
534local_request_is_outstanding_exit:
535 return outstanding;
536}
537
538/*
539 * Read the specified status registers and return their values.
540 *
541 * status: array of id-value pairs. Each <id> specifies a status register,
542 * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will
543 * contain the value of the status register.
544 * count: number of id-value pairs in the array
545 *
546 * Return value:
547 * 0: success
548 * -EBUSY: RPM is updating the status page; values across different registers
549 * may not be consistent
550 * -EINVAL: invalid id in <status> array
551 */
552int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count)
553{
554 uint32_t seq_begin;
555 uint32_t seq_end;
556 int rc;
557 int i;
558
559 seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS,
560 MSM_RPM_STATUS_ID_SEQUENCE);
561
562 for (i = 0; i < count; i++) {
563 if (status[i].id > MSM_RPM_STATUS_ID_LAST) {
564 rc = -EINVAL;
565 goto get_status_exit;
566 }
567
568 status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS,
569 status[i].id);
570 }
571
572 seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS,
573 MSM_RPM_STATUS_ID_SEQUENCE);
574
575 rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
576
577get_status_exit:
578 return rc;
579}
580EXPORT_SYMBOL(msm_rpm_get_status);
581
582/*
583 * Issue a resource request to RPM to set resource values.
584 *
585 * Note: the function may sleep and must be called in a task context.
586 *
587 * ctx: the request's context.
588 * There two contexts that a RPM driver client can use:
589 * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values
590 * that are intended to take effect when the CPU is active,
591 * MSM_RPM_CTX_SET_0 should be used. For resource values that are
592 * intended to take effect when the CPU is not active,
593 * MSM_RPM_CTX_SET_SLEEP should be used.
594 * req: array of id-value pairs. Each <id> specifies a RPM resource,
595 * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested
596 * resource value.
597 * count: number of id-value pairs in the array
598 *
599 * Return value:
600 * 0: success
601 * -EINTR: interrupted
602 * -EINVAL: invalid <ctx> or invalid id in <req> array
603 * -ENOSPC: request rejected
604 */
605int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count)
606{
607 return msm_rpm_set_common(ctx, req, count, false);
608}
609EXPORT_SYMBOL(msm_rpm_set);
610
611/*
612 * Issue a resource request to RPM to set resource values.
613 *
614 * Note: the function is similar to msm_rpm_set() except that it must be
615 * called with interrupts masked. If possible, use msm_rpm_set()
616 * instead, to maximize CPU throughput.
617 */
618int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
619{
620 WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called "
621 "safely when local irqs are disabled. Consider using "
622 "msm_rpm_set or msm_rpm_set_nosleep instead.");
623 return msm_rpm_set_common(ctx, req, count, true);
624}
625EXPORT_SYMBOL(msm_rpm_set_noirq);
626
627/*
628 * Issue a resource request to RPM to clear resource values. Once the
629 * values are cleared, the resources revert back to their default values
630 * for this RPM master.
631 *
632 * Note: the function may sleep and must be called in a task context.
633 *
634 * ctx: the request's context.
635 * req: array of id-value pairs. Each <id> specifies a RPM resource,
636 * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored.
637 * count: number of id-value pairs in the array
638 *
639 * Return value:
640 * 0: success
641 * -EINTR: interrupted
642 * -EINVAL: invalid <ctx> or invalid id in <req> array
643 */
644int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
645{
646 return msm_rpm_clear_common(ctx, req, count, false);
647}
648EXPORT_SYMBOL(msm_rpm_clear);
649
650/*
651 * Issue a resource request to RPM to clear resource values.
652 *
653 * Note: the function is similar to msm_rpm_clear() except that it must be
654 * called with interrupts masked. If possible, use msm_rpm_clear()
655 * instead, to maximize CPU throughput.
656 */
657int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
658{
659 WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called "
660 "safely when local irqs are disabled. Consider using "
661 "msm_rpm_clear or msm_rpm_clear_nosleep instead.");
662 return msm_rpm_clear_common(ctx, req, count, true);
663}
664EXPORT_SYMBOL(msm_rpm_clear_noirq);
665
666/*
667 * Register for RPM notification. When the specified resources
668 * change their status on RPM, RPM sends out notifications and the
669 * driver will "up" the semaphore in struct msm_rpm_notification.
670 *
671 * Note: the function may sleep and must be called in a task context.
672 *
673 * Memory for <n> must not be freed until the notification is
674 * unregistered. Memory for <req> can be freed after this
675 * function returns.
676 *
677 * n: the notifcation object. Caller should initialize only the
678 * semaphore field. When a notification arrives later, the
679 * semaphore will be "up"ed.
680 * req: array of id-value pairs. Each <id> specifies a status register,
681 * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored.
682 * count: number of id-value pairs in the array
683 *
684 * Return value:
685 * 0: success
686 * -EINTR: interrupted
687 * -EINVAL: invalid id in <req> array
688 */
689int msm_rpm_register_notification(struct msm_rpm_notification *n,
690 struct msm_rpm_iv_pair *req, int count)
691{
692 unsigned long flags;
693 unsigned int ctx;
694 struct msm_rpm_notif_config cfg;
695 int rc;
696 int i;
697
698 INIT_LIST_HEAD(&n->list);
699 rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count);
700 if (rc)
701 goto register_notification_exit;
702
703 rc = mutex_lock_interruptible(&msm_rpm_mutex);
704 if (rc)
705 goto register_notification_exit;
706
707 if (!msm_rpm_init_notif_done) {
708 msm_rpm_initialize_notification();
709 msm_rpm_init_notif_done = true;
710 }
711
712 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
713 list_add(&n->list, &msm_rpm_notifications);
714 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
715
716 ctx = MSM_RPM_CTX_SET_0;
717 cfg = msm_rpm_notif_cfgs[ctx];
718
719 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
720 registered_iv(&cfg)[i].value |= n->sel_masks[i];
721
722 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
723 mutex_unlock(&msm_rpm_mutex);
724
725register_notification_exit:
726 return rc;
727}
728EXPORT_SYMBOL(msm_rpm_register_notification);
729
730/*
731 * Unregister a notification.
732 *
733 * Note: the function may sleep and must be called in a task context.
734 *
735 * n: the notifcation object that was registered previously.
736 *
737 * Return value:
738 * 0: success
739 * -EINTR: interrupted
740 */
741int msm_rpm_unregister_notification(struct msm_rpm_notification *n)
742{
743 unsigned long flags;
744 unsigned int ctx;
745 struct msm_rpm_notif_config cfg;
746 int rc;
747 int i;
748
749 rc = mutex_lock_interruptible(&msm_rpm_mutex);
750 if (rc)
751 goto unregister_notification_exit;
752
753 ctx = MSM_RPM_CTX_SET_0;
754 cfg = msm_rpm_notif_cfgs[ctx];
755
756 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
757 registered_iv(&cfg)[i].value = 0;
758
759 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
760 list_del(&n->list);
761 list_for_each_entry(n, &msm_rpm_notifications, list)
762 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
763 registered_iv(&cfg)[i].value |= n->sel_masks[i];
764 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
765
766 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
767 mutex_unlock(&msm_rpm_mutex);
768
769unregister_notification_exit:
770 return rc;
771}
772EXPORT_SYMBOL(msm_rpm_unregister_notification);
773
774static void __init msm_rpm_populate_map(void)
775{
776 int i, k;
777
778 for (i = 0; i < ARRAY_SIZE(msm_rpm_map); i++)
779 msm_rpm_map[i] = MSM_RPM_SEL_LAST + 1;
780
781 for (i = 0; i < rpm_map_data_size; i++) {
782 struct msm_rpm_map_data *raw_data = &rpm_map_data[i];
783
784 for (k = 0; k < raw_data->count; k++)
785 msm_rpm_map[raw_data->id + k] = raw_data->sel;
786 }
787}
788
789int __init msm_rpm_init(struct msm_rpm_platform_data *data)
790{
791 uint32_t major;
792 uint32_t minor;
793 uint32_t build;
794 unsigned int irq;
795 int rc;
796
797 msm_rpm_platform = data;
798
799 major = msm_rpm_read(MSM_RPM_PAGE_STATUS,
800 MSM_RPM_STATUS_ID_VERSION_MAJOR);
801 minor = msm_rpm_read(MSM_RPM_PAGE_STATUS,
802 MSM_RPM_STATUS_ID_VERSION_MINOR);
803 build = msm_rpm_read(MSM_RPM_PAGE_STATUS,
804 MSM_RPM_STATUS_ID_VERSION_BUILD);
805 pr_info("%s: RPM firmware %u.%u.%u\n", __func__, major, minor, build);
806
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600807 if (major != RPM_MAJOR_VER) {
808 pr_err("%s: RPM version %u.%u.%u incompatible with "
809 "this driver version %u.%u.%u\n", __func__,
810 major, minor, build,
811 RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER);
812 return -EFAULT;
813 }
814
815 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MAJOR,
816 RPM_MAJOR_VER);
817 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MINOR,
818 RPM_MINOR_VER);
819 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_BUILD,
820 RPM_BUILD_VER);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821
822 irq = msm_rpm_platform->irq_ack;
823
824 rc = request_irq(irq, msm_rpm_ack_interrupt,
825 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
826 "rpm_drv", msm_rpm_ack_interrupt);
827 if (rc) {
828 pr_err("%s: failed to request irq %d: %d\n",
829 __func__, irq, rc);
830 return rc;
831 }
832
833 rc = irq_set_irq_wake(irq, 1);
834 if (rc) {
835 pr_err("%s: failed to set wakeup irq %u: %d\n",
836 __func__, irq, rc);
837 return rc;
838 }
839
840 msm_rpm_populate_map();
841 return 0;
842}