blob: ef2956aa0b6ce96e5391419aa3f531321b831120 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/semaphore.h>
27#include <linux/spinlock.h>
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -060028#include <linux/device.h>
29#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <asm/hardware/gic.h>
31#include <mach/msm_iomap.h>
32#include <mach/rpm.h>
33
34/******************************************************************************
35 * Data type and structure definitions
36 *****************************************************************************/
37
38struct msm_rpm_request {
39 struct msm_rpm_iv_pair *req;
40 int count;
41 uint32_t *ctx_mask_ack;
42 uint32_t *sel_masks_ack;
43 struct completion *done;
44};
45
46struct msm_rpm_notif_config {
47 struct msm_rpm_iv_pair iv[MSM_RPM_SEL_MASK_SIZE * 2];
48};
49
50#define configured_iv(notif_cfg) ((notif_cfg)->iv)
51#define registered_iv(notif_cfg) ((notif_cfg)->iv + MSM_RPM_SEL_MASK_SIZE)
52
53static struct msm_rpm_platform_data *msm_rpm_platform;
54static uint32_t msm_rpm_map[MSM_RPM_ID_LAST + 1];
55
56static DEFINE_MUTEX(msm_rpm_mutex);
57static DEFINE_SPINLOCK(msm_rpm_lock);
58static DEFINE_SPINLOCK(msm_rpm_irq_lock);
59
60static struct msm_rpm_request *msm_rpm_request;
61static struct msm_rpm_request msm_rpm_request_irq_mode;
62static struct msm_rpm_request msm_rpm_request_poll_mode;
63
64static LIST_HEAD(msm_rpm_notifications);
65static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT];
66static bool msm_rpm_init_notif_done;
67
68/******************************************************************************
69 * Internal functions
70 *****************************************************************************/
71
72static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg)
73{
74 return __raw_readl(msm_rpm_platform->reg_base_addrs[page] + reg * 4);
75}
76
77static inline void msm_rpm_write(
78 unsigned int page, unsigned int reg, uint32_t value)
79{
80 __raw_writel(value, msm_rpm_platform->reg_base_addrs[page] + reg * 4);
81}
82
83static inline void msm_rpm_read_contiguous(
84 unsigned int page, unsigned int reg, uint32_t *values, int count)
85{
86 int i;
87
88 for (i = 0; i < count; i++)
89 values[i] = msm_rpm_read(page, reg + i);
90}
91
92static inline void msm_rpm_write_contiguous(
93 unsigned int page, unsigned int reg, uint32_t *values, int count)
94{
95 int i;
96
97 for (i = 0; i < count; i++)
98 msm_rpm_write(page, reg + i, values[i]);
99}
100
101static inline void msm_rpm_write_contiguous_zeros(
102 unsigned int page, unsigned int reg, int count)
103{
104 int i;
105
106 for (i = 0; i < count; i++)
107 msm_rpm_write(page, reg + i, 0);
108}
109
110static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id)
111{
112 return (id > MSM_RPM_ID_LAST) ? MSM_RPM_SEL_LAST + 1 : msm_rpm_map[id];
113}
114
115/*
116 * Note: the function does not clear the masks before filling them.
117 *
118 * Return value:
119 * 0: success
120 * -EINVAL: invalid id in <req> array
121 */
122static int msm_rpm_fill_sel_masks(
123 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
124{
125 uint32_t sel;
126 int i;
127
128 for (i = 0; i < count; i++) {
129 sel = msm_rpm_map_id_to_sel(req[i].id);
130
131 if (sel > MSM_RPM_SEL_LAST)
132 return -EINVAL;
133
134 sel_masks[msm_rpm_get_sel_mask_reg(sel)] |=
135 msm_rpm_get_sel_mask(sel);
136 }
137
138 return 0;
139}
140
141static inline void msm_rpm_send_req_interrupt(void)
142{
143 __raw_writel(msm_rpm_platform->msm_apps_ipc_rpm_val,
144 msm_rpm_platform->msm_apps_ipc_rpm_reg);
145}
146
147/*
148 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
149 *
150 * Return value:
151 * 0: request acknowledgement
152 * 1: notification
153 * 2: spurious interrupt
154 */
155static int msm_rpm_process_ack_interrupt(void)
156{
157 uint32_t ctx_mask_ack;
158 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
159
160 ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0);
161 msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL,
162 MSM_RPM_CTRL_ACK_SEL_0, sel_masks_ack, MSM_RPM_SEL_MASK_SIZE);
163
164 if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) {
165 struct msm_rpm_notification *n;
166 int i;
167
168 list_for_each_entry(n, &msm_rpm_notifications, list)
169 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
170 if (sel_masks_ack[i] & n->sel_masks[i]) {
171 up(&n->sem);
172 break;
173 }
174
175 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
176 MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE);
177 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0);
178 /* Ensure the write is complete before return */
179 mb();
180
181 return 1;
182 }
183
184 if (msm_rpm_request) {
185 int i;
186
187 *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack;
188 memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack,
189 sizeof(sel_masks_ack));
190
191 for (i = 0; i < msm_rpm_request->count; i++)
192 msm_rpm_request->req[i].value =
193 msm_rpm_read(MSM_RPM_PAGE_ACK,
194 msm_rpm_request->req[i].id);
195
196 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
197 MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE);
198 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0);
199 /* Ensure the write is complete before return */
200 mb();
201
202 if (msm_rpm_request->done)
203 complete_all(msm_rpm_request->done);
204
205 msm_rpm_request = NULL;
206 return 0;
207 }
208
209 return 2;
210}
211
212static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id)
213{
214 unsigned long flags;
215 int rc;
216
217 if (dev_id != &msm_rpm_ack_interrupt)
218 return IRQ_NONE;
219
220 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
221 rc = msm_rpm_process_ack_interrupt();
222 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
223
224 return IRQ_HANDLED;
225}
226
227/*
228 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
229 */
230static void msm_rpm_busy_wait_for_request_completion(
231 bool allow_async_completion)
232{
233 int rc;
234
235 do {
236 while (!gic_is_spi_pending(msm_rpm_platform->irq_ack) &&
237 msm_rpm_request) {
238 if (allow_async_completion)
239 spin_unlock(&msm_rpm_irq_lock);
240 udelay(1);
241 if (allow_async_completion)
242 spin_lock(&msm_rpm_irq_lock);
243 }
244
245 if (!msm_rpm_request)
246 break;
247
248 rc = msm_rpm_process_ack_interrupt();
249 gic_clear_spi_pending(msm_rpm_platform->irq_ack);
250 } while (rc);
251}
252
253/* Upon return, the <req> array will contain values from the ack page.
254 *
255 * Note: assumes caller has acquired <msm_rpm_mutex>.
256 *
257 * Return value:
258 * 0: success
259 * -ENOSPC: request rejected
260 */
261static int msm_rpm_set_exclusive(int ctx,
262 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
263{
264 DECLARE_COMPLETION_ONSTACK(ack);
265 unsigned long flags;
266 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600267 uint32_t ctx_mask_ack = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
269 int i;
270
271 msm_rpm_request_irq_mode.req = req;
272 msm_rpm_request_irq_mode.count = count;
273 msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
274 msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
275 msm_rpm_request_irq_mode.done = &ack;
276
277 spin_lock_irqsave(&msm_rpm_lock, flags);
278 spin_lock(&msm_rpm_irq_lock);
279
280 BUG_ON(msm_rpm_request);
281 msm_rpm_request = &msm_rpm_request_irq_mode;
282
283 for (i = 0; i < count; i++) {
284 BUG_ON(req[i].id > MSM_RPM_ID_LAST);
285 msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value);
286 }
287
288 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
289 MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE);
290 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask);
291
292 /* Ensure RPM data is written before sending the interrupt */
293 mb();
294 msm_rpm_send_req_interrupt();
295
296 spin_unlock(&msm_rpm_irq_lock);
297 spin_unlock_irqrestore(&msm_rpm_lock, flags);
298
299 wait_for_completion(&ack);
300
301 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
302 != ctx_mask);
303 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
304
305 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
306 ? -ENOSPC : 0;
307}
308
309/* Upon return, the <req> array will contain values from the ack page.
310 *
311 * Note: assumes caller has acquired <msm_rpm_lock>.
312 *
313 * Return value:
314 * 0: success
315 * -ENOSPC: request rejected
316 */
317static int msm_rpm_set_exclusive_noirq(int ctx,
318 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
319{
320 unsigned int irq = msm_rpm_platform->irq_ack;
321 unsigned long flags;
322 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600323 uint32_t ctx_mask_ack = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600325 struct irq_chip *irq_chip = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326 int i;
327
328 msm_rpm_request_poll_mode.req = req;
329 msm_rpm_request_poll_mode.count = count;
330 msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack;
331 msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack;
332 msm_rpm_request_poll_mode.done = NULL;
333
334 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600335 irq_chip = irq_get_chip(irq);
336 if (!irq_chip) {
337 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
338 return -ENOSPC;
339 }
340 irq_chip->irq_mask(irq_get_irq_data(irq));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341
342 if (msm_rpm_request) {
343 msm_rpm_busy_wait_for_request_completion(true);
344 BUG_ON(msm_rpm_request);
345 }
346
347 msm_rpm_request = &msm_rpm_request_poll_mode;
348
349 for (i = 0; i < count; i++) {
350 BUG_ON(req[i].id > MSM_RPM_ID_LAST);
351 msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value);
352 }
353
354 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
355 MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE);
356 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask);
357
358 /* Ensure RPM data is written before sending the interrupt */
359 mb();
360 msm_rpm_send_req_interrupt();
361
362 msm_rpm_busy_wait_for_request_completion(false);
363 BUG_ON(msm_rpm_request);
364
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600365 irq_chip->irq_unmask(irq_get_irq_data(irq));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
367
368 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
369 != ctx_mask);
370 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
371
372 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
373 ? -ENOSPC : 0;
374}
375
376/* Upon return, the <req> array will contain values from the ack page.
377 *
378 * Return value:
379 * 0: success
380 * -EINTR: interrupted
381 * -EINVAL: invalid <ctx> or invalid id in <req> array
382 * -ENOSPC: request rejected
383 */
384static int msm_rpm_set_common(
385 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
386{
387 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
388 int rc;
389
390 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
391 rc = -EINVAL;
392 goto set_common_exit;
393 }
394
395 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
396 if (rc)
397 goto set_common_exit;
398
399 if (noirq) {
400 unsigned long flags;
401
402 spin_lock_irqsave(&msm_rpm_lock, flags);
403 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count);
404 spin_unlock_irqrestore(&msm_rpm_lock, flags);
405 } else {
406 rc = mutex_lock_interruptible(&msm_rpm_mutex);
407 if (rc)
408 goto set_common_exit;
409
410 rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count);
411 mutex_unlock(&msm_rpm_mutex);
412 }
413
414set_common_exit:
415 return rc;
416}
417
418/*
419 * Return value:
420 * 0: success
421 * -EINTR: interrupted
422 * -EINVAL: invalid <ctx> or invalid id in <req> array
423 */
424static int msm_rpm_clear_common(
425 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
426{
427 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
428 struct msm_rpm_iv_pair r[MSM_RPM_SEL_MASK_SIZE];
429 int rc;
430 int i;
431
432 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
433 rc = -EINVAL;
434 goto clear_common_exit;
435 }
436
437 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
438 if (rc)
439 goto clear_common_exit;
440
441 for (i = 0; i < ARRAY_SIZE(r); i++) {
442 r[i].id = MSM_RPM_ID_INVALIDATE_0 + i;
443 r[i].value = sel_masks[i];
444 }
445
446 memset(sel_masks, 0, sizeof(sel_masks));
447 sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_INVALIDATE)] |=
448 msm_rpm_get_sel_mask(MSM_RPM_SEL_INVALIDATE);
449
450 if (noirq) {
451 unsigned long flags;
452
453 spin_lock_irqsave(&msm_rpm_lock, flags);
454 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r,
455 ARRAY_SIZE(r));
456 spin_unlock_irqrestore(&msm_rpm_lock, flags);
457 BUG_ON(rc);
458 } else {
459 rc = mutex_lock_interruptible(&msm_rpm_mutex);
460 if (rc)
461 goto clear_common_exit;
462
463 rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r));
464 mutex_unlock(&msm_rpm_mutex);
465 BUG_ON(rc);
466 }
467
468clear_common_exit:
469 return rc;
470}
471
472/*
473 * Note: assumes caller has acquired <msm_rpm_mutex>.
474 */
475static void msm_rpm_update_notification(uint32_t ctx,
476 struct msm_rpm_notif_config *curr_cfg,
477 struct msm_rpm_notif_config *new_cfg)
478{
479 if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) {
480 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
481 int rc;
482
483 sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_NOTIFICATION)]
484 |= msm_rpm_get_sel_mask(MSM_RPM_SEL_NOTIFICATION);
485
486 rc = msm_rpm_set_exclusive(ctx,
487 sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv));
488 BUG_ON(rc);
489
490 memcpy(curr_cfg, new_cfg, sizeof(*new_cfg));
491 }
492}
493
494/*
495 * Note: assumes caller has acquired <msm_rpm_mutex>.
496 */
497static void msm_rpm_initialize_notification(void)
498{
499 struct msm_rpm_notif_config cfg;
500 unsigned int ctx;
501 int i;
502
503 for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) {
504 cfg = msm_rpm_notif_cfgs[ctx];
505
506 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) {
507 configured_iv(&cfg)[i].id =
508 MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i;
509 configured_iv(&cfg)[i].value = ~0UL;
510
511 registered_iv(&cfg)[i].id =
512 MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i;
513 registered_iv(&cfg)[i].value = 0;
514 }
515
516 msm_rpm_update_notification(ctx,
517 &msm_rpm_notif_cfgs[ctx], &cfg);
518 }
519}
520
521/******************************************************************************
522 * Public functions
523 *****************************************************************************/
524
525int msm_rpm_local_request_is_outstanding(void)
526{
527 unsigned long flags;
528 int outstanding = 0;
529
530 if (!spin_trylock_irqsave(&msm_rpm_lock, flags))
531 goto local_request_is_outstanding_exit;
532
533 if (!spin_trylock(&msm_rpm_irq_lock))
534 goto local_request_is_outstanding_unlock;
535
536 outstanding = (msm_rpm_request != NULL);
537 spin_unlock(&msm_rpm_irq_lock);
538
539local_request_is_outstanding_unlock:
540 spin_unlock_irqrestore(&msm_rpm_lock, flags);
541
542local_request_is_outstanding_exit:
543 return outstanding;
544}
545
546/*
547 * Read the specified status registers and return their values.
548 *
549 * status: array of id-value pairs. Each <id> specifies a status register,
550 * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will
551 * contain the value of the status register.
552 * count: number of id-value pairs in the array
553 *
554 * Return value:
555 * 0: success
556 * -EBUSY: RPM is updating the status page; values across different registers
557 * may not be consistent
558 * -EINVAL: invalid id in <status> array
559 */
560int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count)
561{
562 uint32_t seq_begin;
563 uint32_t seq_end;
564 int rc;
565 int i;
566
567 seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS,
568 MSM_RPM_STATUS_ID_SEQUENCE);
569
570 for (i = 0; i < count; i++) {
571 if (status[i].id > MSM_RPM_STATUS_ID_LAST) {
572 rc = -EINVAL;
573 goto get_status_exit;
574 }
575
576 status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS,
577 status[i].id);
578 }
579
580 seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS,
581 MSM_RPM_STATUS_ID_SEQUENCE);
582
583 rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
584
585get_status_exit:
586 return rc;
587}
588EXPORT_SYMBOL(msm_rpm_get_status);
589
590/*
591 * Issue a resource request to RPM to set resource values.
592 *
593 * Note: the function may sleep and must be called in a task context.
594 *
595 * ctx: the request's context.
596 * There two contexts that a RPM driver client can use:
597 * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values
598 * that are intended to take effect when the CPU is active,
599 * MSM_RPM_CTX_SET_0 should be used. For resource values that are
600 * intended to take effect when the CPU is not active,
601 * MSM_RPM_CTX_SET_SLEEP should be used.
602 * req: array of id-value pairs. Each <id> specifies a RPM resource,
603 * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested
604 * resource value.
605 * count: number of id-value pairs in the array
606 *
607 * Return value:
608 * 0: success
609 * -EINTR: interrupted
610 * -EINVAL: invalid <ctx> or invalid id in <req> array
611 * -ENOSPC: request rejected
612 */
613int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count)
614{
615 return msm_rpm_set_common(ctx, req, count, false);
616}
617EXPORT_SYMBOL(msm_rpm_set);
618
619/*
620 * Issue a resource request to RPM to set resource values.
621 *
622 * Note: the function is similar to msm_rpm_set() except that it must be
623 * called with interrupts masked. If possible, use msm_rpm_set()
624 * instead, to maximize CPU throughput.
625 */
626int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
627{
628 WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called "
629 "safely when local irqs are disabled. Consider using "
630 "msm_rpm_set or msm_rpm_set_nosleep instead.");
631 return msm_rpm_set_common(ctx, req, count, true);
632}
633EXPORT_SYMBOL(msm_rpm_set_noirq);
634
635/*
636 * Issue a resource request to RPM to clear resource values. Once the
637 * values are cleared, the resources revert back to their default values
638 * for this RPM master.
639 *
640 * Note: the function may sleep and must be called in a task context.
641 *
642 * ctx: the request's context.
643 * req: array of id-value pairs. Each <id> specifies a RPM resource,
644 * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored.
645 * count: number of id-value pairs in the array
646 *
647 * Return value:
648 * 0: success
649 * -EINTR: interrupted
650 * -EINVAL: invalid <ctx> or invalid id in <req> array
651 */
652int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
653{
654 return msm_rpm_clear_common(ctx, req, count, false);
655}
656EXPORT_SYMBOL(msm_rpm_clear);
657
658/*
659 * Issue a resource request to RPM to clear resource values.
660 *
661 * Note: the function is similar to msm_rpm_clear() except that it must be
662 * called with interrupts masked. If possible, use msm_rpm_clear()
663 * instead, to maximize CPU throughput.
664 */
665int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
666{
667 WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called "
668 "safely when local irqs are disabled. Consider using "
669 "msm_rpm_clear or msm_rpm_clear_nosleep instead.");
670 return msm_rpm_clear_common(ctx, req, count, true);
671}
672EXPORT_SYMBOL(msm_rpm_clear_noirq);
673
674/*
675 * Register for RPM notification. When the specified resources
676 * change their status on RPM, RPM sends out notifications and the
677 * driver will "up" the semaphore in struct msm_rpm_notification.
678 *
679 * Note: the function may sleep and must be called in a task context.
680 *
681 * Memory for <n> must not be freed until the notification is
682 * unregistered. Memory for <req> can be freed after this
683 * function returns.
684 *
685 * n: the notifcation object. Caller should initialize only the
686 * semaphore field. When a notification arrives later, the
687 * semaphore will be "up"ed.
688 * req: array of id-value pairs. Each <id> specifies a status register,
689 * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored.
690 * count: number of id-value pairs in the array
691 *
692 * Return value:
693 * 0: success
694 * -EINTR: interrupted
695 * -EINVAL: invalid id in <req> array
696 */
697int msm_rpm_register_notification(struct msm_rpm_notification *n,
698 struct msm_rpm_iv_pair *req, int count)
699{
700 unsigned long flags;
701 unsigned int ctx;
702 struct msm_rpm_notif_config cfg;
703 int rc;
704 int i;
705
706 INIT_LIST_HEAD(&n->list);
707 rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count);
708 if (rc)
709 goto register_notification_exit;
710
711 rc = mutex_lock_interruptible(&msm_rpm_mutex);
712 if (rc)
713 goto register_notification_exit;
714
715 if (!msm_rpm_init_notif_done) {
716 msm_rpm_initialize_notification();
717 msm_rpm_init_notif_done = true;
718 }
719
720 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
721 list_add(&n->list, &msm_rpm_notifications);
722 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
723
724 ctx = MSM_RPM_CTX_SET_0;
725 cfg = msm_rpm_notif_cfgs[ctx];
726
727 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
728 registered_iv(&cfg)[i].value |= n->sel_masks[i];
729
730 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
731 mutex_unlock(&msm_rpm_mutex);
732
733register_notification_exit:
734 return rc;
735}
736EXPORT_SYMBOL(msm_rpm_register_notification);
737
738/*
739 * Unregister a notification.
740 *
741 * Note: the function may sleep and must be called in a task context.
742 *
743 * n: the notifcation object that was registered previously.
744 *
745 * Return value:
746 * 0: success
747 * -EINTR: interrupted
748 */
749int msm_rpm_unregister_notification(struct msm_rpm_notification *n)
750{
751 unsigned long flags;
752 unsigned int ctx;
753 struct msm_rpm_notif_config cfg;
754 int rc;
755 int i;
756
757 rc = mutex_lock_interruptible(&msm_rpm_mutex);
758 if (rc)
759 goto unregister_notification_exit;
760
761 ctx = MSM_RPM_CTX_SET_0;
762 cfg = msm_rpm_notif_cfgs[ctx];
763
764 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
765 registered_iv(&cfg)[i].value = 0;
766
767 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
768 list_del(&n->list);
769 list_for_each_entry(n, &msm_rpm_notifications, list)
770 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
771 registered_iv(&cfg)[i].value |= n->sel_masks[i];
772 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
773
774 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
775 mutex_unlock(&msm_rpm_mutex);
776
777unregister_notification_exit:
778 return rc;
779}
780EXPORT_SYMBOL(msm_rpm_unregister_notification);
781
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600782static uint32_t fw_major, fw_minor, fw_build;
783
784static ssize_t driver_version_show(struct kobject *kobj,
785 struct kobj_attribute *attr, char *buf)
786{
787 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
788 RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER);
789}
790
791static ssize_t fw_version_show(struct kobject *kobj,
792 struct kobj_attribute *attr, char *buf)
793{
794 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
795 fw_major, fw_minor, fw_build);
796}
797
798static struct kobj_attribute driver_version_attr = __ATTR_RO(driver_version);
799static struct kobj_attribute fw_version_attr = __ATTR_RO(fw_version);
800
801static struct attribute *driver_attributes[] = {
802 &driver_version_attr.attr,
803 &fw_version_attr.attr,
804 NULL
805};
806
807static struct attribute_group driver_attr_group = {
808 .attrs = driver_attributes,
809};
810
811static int __devinit msm_rpm_probe(struct platform_device *pdev)
812{
813 return sysfs_create_group(&pdev->dev.kobj, &driver_attr_group);
814}
815
816static int __devexit msm_rpm_remove(struct platform_device *pdev)
817{
818 sysfs_remove_group(&pdev->dev.kobj, &driver_attr_group);
819 return 0;
820}
821
822static struct platform_driver msm_rpm_platform_driver = {
823 .probe = msm_rpm_probe,
824 .remove = __devexit_p(msm_rpm_remove),
825 .driver = {
826 .name = "msm_rpm",
827 .owner = THIS_MODULE,
828 },
829};
830
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831static void __init msm_rpm_populate_map(void)
832{
833 int i, k;
834
835 for (i = 0; i < ARRAY_SIZE(msm_rpm_map); i++)
836 msm_rpm_map[i] = MSM_RPM_SEL_LAST + 1;
837
838 for (i = 0; i < rpm_map_data_size; i++) {
839 struct msm_rpm_map_data *raw_data = &rpm_map_data[i];
840
841 for (k = 0; k < raw_data->count; k++)
842 msm_rpm_map[raw_data->id + k] = raw_data->sel;
843 }
844}
845
846int __init msm_rpm_init(struct msm_rpm_platform_data *data)
847{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 unsigned int irq;
849 int rc;
850
851 msm_rpm_platform = data;
852
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600853 fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 MSM_RPM_STATUS_ID_VERSION_MAJOR);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600855 fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 MSM_RPM_STATUS_ID_VERSION_MINOR);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600857 fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700858 MSM_RPM_STATUS_ID_VERSION_BUILD);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600859 pr_info("%s: RPM firmware %u.%u.%u\n", __func__,
860 fw_major, fw_minor, fw_build);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600862 if (fw_major != RPM_MAJOR_VER) {
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600863 pr_err("%s: RPM version %u.%u.%u incompatible with "
864 "this driver version %u.%u.%u\n", __func__,
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600865 fw_major, fw_minor, fw_build,
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600866 RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER);
867 return -EFAULT;
868 }
869
870 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MAJOR,
871 RPM_MAJOR_VER);
872 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MINOR,
873 RPM_MINOR_VER);
874 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_BUILD,
875 RPM_BUILD_VER);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876
877 irq = msm_rpm_platform->irq_ack;
878
879 rc = request_irq(irq, msm_rpm_ack_interrupt,
880 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
881 "rpm_drv", msm_rpm_ack_interrupt);
882 if (rc) {
883 pr_err("%s: failed to request irq %d: %d\n",
884 __func__, irq, rc);
885 return rc;
886 }
887
888 rc = irq_set_irq_wake(irq, 1);
889 if (rc) {
890 pr_err("%s: failed to set wakeup irq %u: %d\n",
891 __func__, irq, rc);
892 return rc;
893 }
894
895 msm_rpm_populate_map();
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600896
897 return platform_driver_register(&msm_rpm_platform_driver);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898}