blob: bee3c3d796b94557a618e0b9bac53d1aa481456e [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/semaphore.h>
27#include <linux/spinlock.h>
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -060028#include <linux/device.h>
29#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <asm/hardware/gic.h>
31#include <mach/msm_iomap.h>
32#include <mach/rpm.h>
33
34/******************************************************************************
35 * Data type and structure definitions
36 *****************************************************************************/
37
38struct msm_rpm_request {
39 struct msm_rpm_iv_pair *req;
40 int count;
41 uint32_t *ctx_mask_ack;
42 uint32_t *sel_masks_ack;
43 struct completion *done;
44};
45
46struct msm_rpm_notif_config {
47 struct msm_rpm_iv_pair iv[MSM_RPM_SEL_MASK_SIZE * 2];
48};
49
50#define configured_iv(notif_cfg) ((notif_cfg)->iv)
51#define registered_iv(notif_cfg) ((notif_cfg)->iv + MSM_RPM_SEL_MASK_SIZE)
52
53static struct msm_rpm_platform_data *msm_rpm_platform;
54static uint32_t msm_rpm_map[MSM_RPM_ID_LAST + 1];
55
56static DEFINE_MUTEX(msm_rpm_mutex);
57static DEFINE_SPINLOCK(msm_rpm_lock);
58static DEFINE_SPINLOCK(msm_rpm_irq_lock);
59
60static struct msm_rpm_request *msm_rpm_request;
61static struct msm_rpm_request msm_rpm_request_irq_mode;
62static struct msm_rpm_request msm_rpm_request_poll_mode;
63
64static LIST_HEAD(msm_rpm_notifications);
65static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT];
66static bool msm_rpm_init_notif_done;
67
68/******************************************************************************
69 * Internal functions
70 *****************************************************************************/
71
72static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg)
73{
74 return __raw_readl(msm_rpm_platform->reg_base_addrs[page] + reg * 4);
75}
76
77static inline void msm_rpm_write(
78 unsigned int page, unsigned int reg, uint32_t value)
79{
80 __raw_writel(value, msm_rpm_platform->reg_base_addrs[page] + reg * 4);
81}
82
83static inline void msm_rpm_read_contiguous(
84 unsigned int page, unsigned int reg, uint32_t *values, int count)
85{
86 int i;
87
88 for (i = 0; i < count; i++)
89 values[i] = msm_rpm_read(page, reg + i);
90}
91
92static inline void msm_rpm_write_contiguous(
93 unsigned int page, unsigned int reg, uint32_t *values, int count)
94{
95 int i;
96
97 for (i = 0; i < count; i++)
98 msm_rpm_write(page, reg + i, values[i]);
99}
100
101static inline void msm_rpm_write_contiguous_zeros(
102 unsigned int page, unsigned int reg, int count)
103{
104 int i;
105
106 for (i = 0; i < count; i++)
107 msm_rpm_write(page, reg + i, 0);
108}
109
110static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id)
111{
112 return (id > MSM_RPM_ID_LAST) ? MSM_RPM_SEL_LAST + 1 : msm_rpm_map[id];
113}
114
115/*
116 * Note: the function does not clear the masks before filling them.
117 *
118 * Return value:
119 * 0: success
120 * -EINVAL: invalid id in <req> array
121 */
122static int msm_rpm_fill_sel_masks(
123 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
124{
125 uint32_t sel;
126 int i;
127
128 for (i = 0; i < count; i++) {
129 sel = msm_rpm_map_id_to_sel(req[i].id);
130
131 if (sel > MSM_RPM_SEL_LAST)
132 return -EINVAL;
133
134 sel_masks[msm_rpm_get_sel_mask_reg(sel)] |=
135 msm_rpm_get_sel_mask(sel);
136 }
137
138 return 0;
139}
140
141static inline void msm_rpm_send_req_interrupt(void)
142{
143 __raw_writel(msm_rpm_platform->msm_apps_ipc_rpm_val,
144 msm_rpm_platform->msm_apps_ipc_rpm_reg);
145}
146
147/*
148 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
149 *
150 * Return value:
151 * 0: request acknowledgement
152 * 1: notification
153 * 2: spurious interrupt
154 */
155static int msm_rpm_process_ack_interrupt(void)
156{
157 uint32_t ctx_mask_ack;
158 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
159
160 ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0);
161 msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL,
162 MSM_RPM_CTRL_ACK_SEL_0, sel_masks_ack, MSM_RPM_SEL_MASK_SIZE);
163
164 if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) {
165 struct msm_rpm_notification *n;
166 int i;
167
168 list_for_each_entry(n, &msm_rpm_notifications, list)
169 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
170 if (sel_masks_ack[i] & n->sel_masks[i]) {
171 up(&n->sem);
172 break;
173 }
174
175 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
176 MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE);
177 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0);
178 /* Ensure the write is complete before return */
179 mb();
180
181 return 1;
182 }
183
184 if (msm_rpm_request) {
185 int i;
186
187 *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack;
188 memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack,
189 sizeof(sel_masks_ack));
190
191 for (i = 0; i < msm_rpm_request->count; i++)
192 msm_rpm_request->req[i].value =
193 msm_rpm_read(MSM_RPM_PAGE_ACK,
194 msm_rpm_request->req[i].id);
195
196 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
197 MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE);
198 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0);
199 /* Ensure the write is complete before return */
200 mb();
201
202 if (msm_rpm_request->done)
203 complete_all(msm_rpm_request->done);
204
205 msm_rpm_request = NULL;
206 return 0;
207 }
208
209 return 2;
210}
211
212static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id)
213{
214 unsigned long flags;
215 int rc;
216
217 if (dev_id != &msm_rpm_ack_interrupt)
218 return IRQ_NONE;
219
220 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
221 rc = msm_rpm_process_ack_interrupt();
222 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
223
224 return IRQ_HANDLED;
225}
226
227/*
228 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
229 */
230static void msm_rpm_busy_wait_for_request_completion(
231 bool allow_async_completion)
232{
233 int rc;
234
235 do {
236 while (!gic_is_spi_pending(msm_rpm_platform->irq_ack) &&
237 msm_rpm_request) {
238 if (allow_async_completion)
239 spin_unlock(&msm_rpm_irq_lock);
240 udelay(1);
241 if (allow_async_completion)
242 spin_lock(&msm_rpm_irq_lock);
243 }
244
245 if (!msm_rpm_request)
246 break;
247
248 rc = msm_rpm_process_ack_interrupt();
249 gic_clear_spi_pending(msm_rpm_platform->irq_ack);
250 } while (rc);
251}
252
253/* Upon return, the <req> array will contain values from the ack page.
254 *
255 * Note: assumes caller has acquired <msm_rpm_mutex>.
256 *
257 * Return value:
258 * 0: success
259 * -ENOSPC: request rejected
260 */
261static int msm_rpm_set_exclusive(int ctx,
262 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
263{
264 DECLARE_COMPLETION_ONSTACK(ack);
265 unsigned long flags;
266 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
267 uint32_t ctx_mask_ack;
268 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
269 int i;
270
271 msm_rpm_request_irq_mode.req = req;
272 msm_rpm_request_irq_mode.count = count;
273 msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
274 msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
275 msm_rpm_request_irq_mode.done = &ack;
276
277 spin_lock_irqsave(&msm_rpm_lock, flags);
278 spin_lock(&msm_rpm_irq_lock);
279
280 BUG_ON(msm_rpm_request);
281 msm_rpm_request = &msm_rpm_request_irq_mode;
282
283 for (i = 0; i < count; i++) {
284 BUG_ON(req[i].id > MSM_RPM_ID_LAST);
285 msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value);
286 }
287
288 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
289 MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE);
290 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask);
291
292 /* Ensure RPM data is written before sending the interrupt */
293 mb();
294 msm_rpm_send_req_interrupt();
295
296 spin_unlock(&msm_rpm_irq_lock);
297 spin_unlock_irqrestore(&msm_rpm_lock, flags);
298
299 wait_for_completion(&ack);
300
301 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
302 != ctx_mask);
303 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
304
305 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
306 ? -ENOSPC : 0;
307}
308
309/* Upon return, the <req> array will contain values from the ack page.
310 *
311 * Note: assumes caller has acquired <msm_rpm_lock>.
312 *
313 * Return value:
314 * 0: success
315 * -ENOSPC: request rejected
316 */
317static int msm_rpm_set_exclusive_noirq(int ctx,
318 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
319{
320 unsigned int irq = msm_rpm_platform->irq_ack;
321 unsigned long flags;
322 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
323 uint32_t ctx_mask_ack;
324 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
325 int i;
326
327 msm_rpm_request_poll_mode.req = req;
328 msm_rpm_request_poll_mode.count = count;
329 msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack;
330 msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack;
331 msm_rpm_request_poll_mode.done = NULL;
332
333 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
334 irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
335
336 if (msm_rpm_request) {
337 msm_rpm_busy_wait_for_request_completion(true);
338 BUG_ON(msm_rpm_request);
339 }
340
341 msm_rpm_request = &msm_rpm_request_poll_mode;
342
343 for (i = 0; i < count; i++) {
344 BUG_ON(req[i].id > MSM_RPM_ID_LAST);
345 msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value);
346 }
347
348 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
349 MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE);
350 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask);
351
352 /* Ensure RPM data is written before sending the interrupt */
353 mb();
354 msm_rpm_send_req_interrupt();
355
356 msm_rpm_busy_wait_for_request_completion(false);
357 BUG_ON(msm_rpm_request);
358
359 irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
360 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
361
362 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
363 != ctx_mask);
364 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
365
366 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
367 ? -ENOSPC : 0;
368}
369
370/* Upon return, the <req> array will contain values from the ack page.
371 *
372 * Return value:
373 * 0: success
374 * -EINTR: interrupted
375 * -EINVAL: invalid <ctx> or invalid id in <req> array
376 * -ENOSPC: request rejected
377 */
378static int msm_rpm_set_common(
379 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
380{
381 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
382 int rc;
383
384 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
385 rc = -EINVAL;
386 goto set_common_exit;
387 }
388
389 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
390 if (rc)
391 goto set_common_exit;
392
393 if (noirq) {
394 unsigned long flags;
395
396 spin_lock_irqsave(&msm_rpm_lock, flags);
397 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count);
398 spin_unlock_irqrestore(&msm_rpm_lock, flags);
399 } else {
400 rc = mutex_lock_interruptible(&msm_rpm_mutex);
401 if (rc)
402 goto set_common_exit;
403
404 rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count);
405 mutex_unlock(&msm_rpm_mutex);
406 }
407
408set_common_exit:
409 return rc;
410}
411
412/*
413 * Return value:
414 * 0: success
415 * -EINTR: interrupted
416 * -EINVAL: invalid <ctx> or invalid id in <req> array
417 */
418static int msm_rpm_clear_common(
419 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
420{
421 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
422 struct msm_rpm_iv_pair r[MSM_RPM_SEL_MASK_SIZE];
423 int rc;
424 int i;
425
426 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
427 rc = -EINVAL;
428 goto clear_common_exit;
429 }
430
431 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
432 if (rc)
433 goto clear_common_exit;
434
435 for (i = 0; i < ARRAY_SIZE(r); i++) {
436 r[i].id = MSM_RPM_ID_INVALIDATE_0 + i;
437 r[i].value = sel_masks[i];
438 }
439
440 memset(sel_masks, 0, sizeof(sel_masks));
441 sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_INVALIDATE)] |=
442 msm_rpm_get_sel_mask(MSM_RPM_SEL_INVALIDATE);
443
444 if (noirq) {
445 unsigned long flags;
446
447 spin_lock_irqsave(&msm_rpm_lock, flags);
448 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r,
449 ARRAY_SIZE(r));
450 spin_unlock_irqrestore(&msm_rpm_lock, flags);
451 BUG_ON(rc);
452 } else {
453 rc = mutex_lock_interruptible(&msm_rpm_mutex);
454 if (rc)
455 goto clear_common_exit;
456
457 rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r));
458 mutex_unlock(&msm_rpm_mutex);
459 BUG_ON(rc);
460 }
461
462clear_common_exit:
463 return rc;
464}
465
466/*
467 * Note: assumes caller has acquired <msm_rpm_mutex>.
468 */
469static void msm_rpm_update_notification(uint32_t ctx,
470 struct msm_rpm_notif_config *curr_cfg,
471 struct msm_rpm_notif_config *new_cfg)
472{
473 if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) {
474 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
475 int rc;
476
477 sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_NOTIFICATION)]
478 |= msm_rpm_get_sel_mask(MSM_RPM_SEL_NOTIFICATION);
479
480 rc = msm_rpm_set_exclusive(ctx,
481 sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv));
482 BUG_ON(rc);
483
484 memcpy(curr_cfg, new_cfg, sizeof(*new_cfg));
485 }
486}
487
488/*
489 * Note: assumes caller has acquired <msm_rpm_mutex>.
490 */
491static void msm_rpm_initialize_notification(void)
492{
493 struct msm_rpm_notif_config cfg;
494 unsigned int ctx;
495 int i;
496
497 for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) {
498 cfg = msm_rpm_notif_cfgs[ctx];
499
500 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) {
501 configured_iv(&cfg)[i].id =
502 MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i;
503 configured_iv(&cfg)[i].value = ~0UL;
504
505 registered_iv(&cfg)[i].id =
506 MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i;
507 registered_iv(&cfg)[i].value = 0;
508 }
509
510 msm_rpm_update_notification(ctx,
511 &msm_rpm_notif_cfgs[ctx], &cfg);
512 }
513}
514
515/******************************************************************************
516 * Public functions
517 *****************************************************************************/
518
519int msm_rpm_local_request_is_outstanding(void)
520{
521 unsigned long flags;
522 int outstanding = 0;
523
524 if (!spin_trylock_irqsave(&msm_rpm_lock, flags))
525 goto local_request_is_outstanding_exit;
526
527 if (!spin_trylock(&msm_rpm_irq_lock))
528 goto local_request_is_outstanding_unlock;
529
530 outstanding = (msm_rpm_request != NULL);
531 spin_unlock(&msm_rpm_irq_lock);
532
533local_request_is_outstanding_unlock:
534 spin_unlock_irqrestore(&msm_rpm_lock, flags);
535
536local_request_is_outstanding_exit:
537 return outstanding;
538}
539
540/*
541 * Read the specified status registers and return their values.
542 *
543 * status: array of id-value pairs. Each <id> specifies a status register,
544 * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will
545 * contain the value of the status register.
546 * count: number of id-value pairs in the array
547 *
548 * Return value:
549 * 0: success
550 * -EBUSY: RPM is updating the status page; values across different registers
551 * may not be consistent
552 * -EINVAL: invalid id in <status> array
553 */
554int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count)
555{
556 uint32_t seq_begin;
557 uint32_t seq_end;
558 int rc;
559 int i;
560
561 seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS,
562 MSM_RPM_STATUS_ID_SEQUENCE);
563
564 for (i = 0; i < count; i++) {
565 if (status[i].id > MSM_RPM_STATUS_ID_LAST) {
566 rc = -EINVAL;
567 goto get_status_exit;
568 }
569
570 status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS,
571 status[i].id);
572 }
573
574 seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS,
575 MSM_RPM_STATUS_ID_SEQUENCE);
576
577 rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
578
579get_status_exit:
580 return rc;
581}
582EXPORT_SYMBOL(msm_rpm_get_status);
583
584/*
585 * Issue a resource request to RPM to set resource values.
586 *
587 * Note: the function may sleep and must be called in a task context.
588 *
589 * ctx: the request's context.
590 * There two contexts that a RPM driver client can use:
591 * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values
592 * that are intended to take effect when the CPU is active,
593 * MSM_RPM_CTX_SET_0 should be used. For resource values that are
594 * intended to take effect when the CPU is not active,
595 * MSM_RPM_CTX_SET_SLEEP should be used.
596 * req: array of id-value pairs. Each <id> specifies a RPM resource,
597 * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested
598 * resource value.
599 * count: number of id-value pairs in the array
600 *
601 * Return value:
602 * 0: success
603 * -EINTR: interrupted
604 * -EINVAL: invalid <ctx> or invalid id in <req> array
605 * -ENOSPC: request rejected
606 */
607int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count)
608{
609 return msm_rpm_set_common(ctx, req, count, false);
610}
611EXPORT_SYMBOL(msm_rpm_set);
612
613/*
614 * Issue a resource request to RPM to set resource values.
615 *
616 * Note: the function is similar to msm_rpm_set() except that it must be
617 * called with interrupts masked. If possible, use msm_rpm_set()
618 * instead, to maximize CPU throughput.
619 */
620int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
621{
622 WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called "
623 "safely when local irqs are disabled. Consider using "
624 "msm_rpm_set or msm_rpm_set_nosleep instead.");
625 return msm_rpm_set_common(ctx, req, count, true);
626}
627EXPORT_SYMBOL(msm_rpm_set_noirq);
628
629/*
630 * Issue a resource request to RPM to clear resource values. Once the
631 * values are cleared, the resources revert back to their default values
632 * for this RPM master.
633 *
634 * Note: the function may sleep and must be called in a task context.
635 *
636 * ctx: the request's context.
637 * req: array of id-value pairs. Each <id> specifies a RPM resource,
638 * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored.
639 * count: number of id-value pairs in the array
640 *
641 * Return value:
642 * 0: success
643 * -EINTR: interrupted
644 * -EINVAL: invalid <ctx> or invalid id in <req> array
645 */
646int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
647{
648 return msm_rpm_clear_common(ctx, req, count, false);
649}
650EXPORT_SYMBOL(msm_rpm_clear);
651
652/*
653 * Issue a resource request to RPM to clear resource values.
654 *
655 * Note: the function is similar to msm_rpm_clear() except that it must be
656 * called with interrupts masked. If possible, use msm_rpm_clear()
657 * instead, to maximize CPU throughput.
658 */
659int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
660{
661 WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called "
662 "safely when local irqs are disabled. Consider using "
663 "msm_rpm_clear or msm_rpm_clear_nosleep instead.");
664 return msm_rpm_clear_common(ctx, req, count, true);
665}
666EXPORT_SYMBOL(msm_rpm_clear_noirq);
667
668/*
669 * Register for RPM notification. When the specified resources
670 * change their status on RPM, RPM sends out notifications and the
671 * driver will "up" the semaphore in struct msm_rpm_notification.
672 *
673 * Note: the function may sleep and must be called in a task context.
674 *
675 * Memory for <n> must not be freed until the notification is
676 * unregistered. Memory for <req> can be freed after this
677 * function returns.
678 *
679 * n: the notifcation object. Caller should initialize only the
680 * semaphore field. When a notification arrives later, the
681 * semaphore will be "up"ed.
682 * req: array of id-value pairs. Each <id> specifies a status register,
683 * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored.
684 * count: number of id-value pairs in the array
685 *
686 * Return value:
687 * 0: success
688 * -EINTR: interrupted
689 * -EINVAL: invalid id in <req> array
690 */
691int msm_rpm_register_notification(struct msm_rpm_notification *n,
692 struct msm_rpm_iv_pair *req, int count)
693{
694 unsigned long flags;
695 unsigned int ctx;
696 struct msm_rpm_notif_config cfg;
697 int rc;
698 int i;
699
700 INIT_LIST_HEAD(&n->list);
701 rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count);
702 if (rc)
703 goto register_notification_exit;
704
705 rc = mutex_lock_interruptible(&msm_rpm_mutex);
706 if (rc)
707 goto register_notification_exit;
708
709 if (!msm_rpm_init_notif_done) {
710 msm_rpm_initialize_notification();
711 msm_rpm_init_notif_done = true;
712 }
713
714 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
715 list_add(&n->list, &msm_rpm_notifications);
716 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
717
718 ctx = MSM_RPM_CTX_SET_0;
719 cfg = msm_rpm_notif_cfgs[ctx];
720
721 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
722 registered_iv(&cfg)[i].value |= n->sel_masks[i];
723
724 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
725 mutex_unlock(&msm_rpm_mutex);
726
727register_notification_exit:
728 return rc;
729}
730EXPORT_SYMBOL(msm_rpm_register_notification);
731
732/*
733 * Unregister a notification.
734 *
735 * Note: the function may sleep and must be called in a task context.
736 *
737 * n: the notifcation object that was registered previously.
738 *
739 * Return value:
740 * 0: success
741 * -EINTR: interrupted
742 */
743int msm_rpm_unregister_notification(struct msm_rpm_notification *n)
744{
745 unsigned long flags;
746 unsigned int ctx;
747 struct msm_rpm_notif_config cfg;
748 int rc;
749 int i;
750
751 rc = mutex_lock_interruptible(&msm_rpm_mutex);
752 if (rc)
753 goto unregister_notification_exit;
754
755 ctx = MSM_RPM_CTX_SET_0;
756 cfg = msm_rpm_notif_cfgs[ctx];
757
758 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
759 registered_iv(&cfg)[i].value = 0;
760
761 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
762 list_del(&n->list);
763 list_for_each_entry(n, &msm_rpm_notifications, list)
764 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
765 registered_iv(&cfg)[i].value |= n->sel_masks[i];
766 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
767
768 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
769 mutex_unlock(&msm_rpm_mutex);
770
771unregister_notification_exit:
772 return rc;
773}
774EXPORT_SYMBOL(msm_rpm_unregister_notification);
775
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600776static uint32_t fw_major, fw_minor, fw_build;
777
778static ssize_t driver_version_show(struct kobject *kobj,
779 struct kobj_attribute *attr, char *buf)
780{
781 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
782 RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER);
783}
784
785static ssize_t fw_version_show(struct kobject *kobj,
786 struct kobj_attribute *attr, char *buf)
787{
788 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
789 fw_major, fw_minor, fw_build);
790}
791
792static struct kobj_attribute driver_version_attr = __ATTR_RO(driver_version);
793static struct kobj_attribute fw_version_attr = __ATTR_RO(fw_version);
794
795static struct attribute *driver_attributes[] = {
796 &driver_version_attr.attr,
797 &fw_version_attr.attr,
798 NULL
799};
800
801static struct attribute_group driver_attr_group = {
802 .attrs = driver_attributes,
803};
804
805static int __devinit msm_rpm_probe(struct platform_device *pdev)
806{
807 return sysfs_create_group(&pdev->dev.kobj, &driver_attr_group);
808}
809
810static int __devexit msm_rpm_remove(struct platform_device *pdev)
811{
812 sysfs_remove_group(&pdev->dev.kobj, &driver_attr_group);
813 return 0;
814}
815
816static struct platform_driver msm_rpm_platform_driver = {
817 .probe = msm_rpm_probe,
818 .remove = __devexit_p(msm_rpm_remove),
819 .driver = {
820 .name = "msm_rpm",
821 .owner = THIS_MODULE,
822 },
823};
824
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825static void __init msm_rpm_populate_map(void)
826{
827 int i, k;
828
829 for (i = 0; i < ARRAY_SIZE(msm_rpm_map); i++)
830 msm_rpm_map[i] = MSM_RPM_SEL_LAST + 1;
831
832 for (i = 0; i < rpm_map_data_size; i++) {
833 struct msm_rpm_map_data *raw_data = &rpm_map_data[i];
834
835 for (k = 0; k < raw_data->count; k++)
836 msm_rpm_map[raw_data->id + k] = raw_data->sel;
837 }
838}
839
840int __init msm_rpm_init(struct msm_rpm_platform_data *data)
841{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842 unsigned int irq;
843 int rc;
844
845 msm_rpm_platform = data;
846
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600847 fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 MSM_RPM_STATUS_ID_VERSION_MAJOR);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600849 fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 MSM_RPM_STATUS_ID_VERSION_MINOR);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600851 fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 MSM_RPM_STATUS_ID_VERSION_BUILD);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600853 pr_info("%s: RPM firmware %u.%u.%u\n", __func__,
854 fw_major, fw_minor, fw_build);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600856 if (fw_major != RPM_MAJOR_VER) {
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600857 pr_err("%s: RPM version %u.%u.%u incompatible with "
858 "this driver version %u.%u.%u\n", __func__,
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600859 fw_major, fw_minor, fw_build,
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600860 RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER);
861 return -EFAULT;
862 }
863
864 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MAJOR,
865 RPM_MAJOR_VER);
866 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MINOR,
867 RPM_MINOR_VER);
868 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_BUILD,
869 RPM_BUILD_VER);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870
871 irq = msm_rpm_platform->irq_ack;
872
873 rc = request_irq(irq, msm_rpm_ack_interrupt,
874 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
875 "rpm_drv", msm_rpm_ack_interrupt);
876 if (rc) {
877 pr_err("%s: failed to request irq %d: %d\n",
878 __func__, irq, rc);
879 return rc;
880 }
881
882 rc = irq_set_irq_wake(irq, 1);
883 if (rc) {
884 pr_err("%s: failed to set wakeup irq %u: %d\n",
885 __func__, irq, rc);
886 return rc;
887 }
888
889 msm_rpm_populate_map();
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600890
891 return platform_driver_register(&msm_rpm_platform_driver);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892}