blob: a8d787af434b00a8da7704ac63355e83fe953a04 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/semaphore.h>
27#include <linux/spinlock.h>
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -060028#include <linux/device.h>
29#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <asm/hardware/gic.h>
31#include <mach/msm_iomap.h>
32#include <mach/rpm.h>
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -070033#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35/******************************************************************************
36 * Data type and structure definitions
37 *****************************************************************************/
38
39struct msm_rpm_request {
40 struct msm_rpm_iv_pair *req;
41 int count;
42 uint32_t *ctx_mask_ack;
43 uint32_t *sel_masks_ack;
44 struct completion *done;
45};
46
47struct msm_rpm_notif_config {
48 struct msm_rpm_iv_pair iv[MSM_RPM_SEL_MASK_SIZE * 2];
49};
50
51#define configured_iv(notif_cfg) ((notif_cfg)->iv)
52#define registered_iv(notif_cfg) ((notif_cfg)->iv + MSM_RPM_SEL_MASK_SIZE)
53
54static struct msm_rpm_platform_data *msm_rpm_platform;
55static uint32_t msm_rpm_map[MSM_RPM_ID_LAST + 1];
56
57static DEFINE_MUTEX(msm_rpm_mutex);
58static DEFINE_SPINLOCK(msm_rpm_lock);
59static DEFINE_SPINLOCK(msm_rpm_irq_lock);
60
61static struct msm_rpm_request *msm_rpm_request;
62static struct msm_rpm_request msm_rpm_request_irq_mode;
63static struct msm_rpm_request msm_rpm_request_poll_mode;
64
65static LIST_HEAD(msm_rpm_notifications);
66static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT];
67static bool msm_rpm_init_notif_done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068/******************************************************************************
69 * Internal functions
70 *****************************************************************************/
71
72static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg)
73{
74 return __raw_readl(msm_rpm_platform->reg_base_addrs[page] + reg * 4);
75}
76
77static inline void msm_rpm_write(
78 unsigned int page, unsigned int reg, uint32_t value)
79{
80 __raw_writel(value, msm_rpm_platform->reg_base_addrs[page] + reg * 4);
81}
82
83static inline void msm_rpm_read_contiguous(
84 unsigned int page, unsigned int reg, uint32_t *values, int count)
85{
86 int i;
87
88 for (i = 0; i < count; i++)
89 values[i] = msm_rpm_read(page, reg + i);
90}
91
92static inline void msm_rpm_write_contiguous(
93 unsigned int page, unsigned int reg, uint32_t *values, int count)
94{
95 int i;
96
97 for (i = 0; i < count; i++)
98 msm_rpm_write(page, reg + i, values[i]);
99}
100
101static inline void msm_rpm_write_contiguous_zeros(
102 unsigned int page, unsigned int reg, int count)
103{
104 int i;
105
106 for (i = 0; i < count; i++)
107 msm_rpm_write(page, reg + i, 0);
108}
109
110static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id)
111{
112 return (id > MSM_RPM_ID_LAST) ? MSM_RPM_SEL_LAST + 1 : msm_rpm_map[id];
113}
114
115/*
116 * Note: the function does not clear the masks before filling them.
117 *
118 * Return value:
119 * 0: success
120 * -EINVAL: invalid id in <req> array
121 */
122static int msm_rpm_fill_sel_masks(
123 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
124{
125 uint32_t sel;
126 int i;
127
128 for (i = 0; i < count; i++) {
129 sel = msm_rpm_map_id_to_sel(req[i].id);
130
131 if (sel > MSM_RPM_SEL_LAST)
132 return -EINVAL;
133
134 sel_masks[msm_rpm_get_sel_mask_reg(sel)] |=
135 msm_rpm_get_sel_mask(sel);
136 }
137
138 return 0;
139}
140
141static inline void msm_rpm_send_req_interrupt(void)
142{
143 __raw_writel(msm_rpm_platform->msm_apps_ipc_rpm_val,
144 msm_rpm_platform->msm_apps_ipc_rpm_reg);
145}
146
147/*
148 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
149 *
150 * Return value:
151 * 0: request acknowledgement
152 * 1: notification
153 * 2: spurious interrupt
154 */
155static int msm_rpm_process_ack_interrupt(void)
156{
157 uint32_t ctx_mask_ack;
158 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
159
160 ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0);
161 msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL,
162 MSM_RPM_CTRL_ACK_SEL_0, sel_masks_ack, MSM_RPM_SEL_MASK_SIZE);
163
164 if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) {
165 struct msm_rpm_notification *n;
166 int i;
167
168 list_for_each_entry(n, &msm_rpm_notifications, list)
169 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
170 if (sel_masks_ack[i] & n->sel_masks[i]) {
171 up(&n->sem);
172 break;
173 }
174
175 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
176 MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE);
177 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0);
178 /* Ensure the write is complete before return */
179 mb();
180
181 return 1;
182 }
183
184 if (msm_rpm_request) {
185 int i;
186
187 *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack;
188 memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack,
189 sizeof(sel_masks_ack));
190
191 for (i = 0; i < msm_rpm_request->count; i++)
192 msm_rpm_request->req[i].value =
193 msm_rpm_read(MSM_RPM_PAGE_ACK,
194 msm_rpm_request->req[i].id);
195
196 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
197 MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE);
198 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0);
199 /* Ensure the write is complete before return */
200 mb();
201
202 if (msm_rpm_request->done)
203 complete_all(msm_rpm_request->done);
204
205 msm_rpm_request = NULL;
206 return 0;
207 }
208
209 return 2;
210}
211
212static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id)
213{
214 unsigned long flags;
215 int rc;
216
217 if (dev_id != &msm_rpm_ack_interrupt)
218 return IRQ_NONE;
219
220 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
221 rc = msm_rpm_process_ack_interrupt();
222 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
223
224 return IRQ_HANDLED;
225}
226
227/*
228 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
229 */
230static void msm_rpm_busy_wait_for_request_completion(
231 bool allow_async_completion)
232{
233 int rc;
234
235 do {
236 while (!gic_is_spi_pending(msm_rpm_platform->irq_ack) &&
237 msm_rpm_request) {
238 if (allow_async_completion)
239 spin_unlock(&msm_rpm_irq_lock);
240 udelay(1);
241 if (allow_async_completion)
242 spin_lock(&msm_rpm_irq_lock);
243 }
244
245 if (!msm_rpm_request)
246 break;
247
248 rc = msm_rpm_process_ack_interrupt();
249 gic_clear_spi_pending(msm_rpm_platform->irq_ack);
250 } while (rc);
251}
252
253/* Upon return, the <req> array will contain values from the ack page.
254 *
255 * Note: assumes caller has acquired <msm_rpm_mutex>.
256 *
257 * Return value:
258 * 0: success
259 * -ENOSPC: request rejected
260 */
261static int msm_rpm_set_exclusive(int ctx,
262 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
263{
264 DECLARE_COMPLETION_ONSTACK(ack);
265 unsigned long flags;
266 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600267 uint32_t ctx_mask_ack = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
269 int i;
270
271 msm_rpm_request_irq_mode.req = req;
272 msm_rpm_request_irq_mode.count = count;
273 msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
274 msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
275 msm_rpm_request_irq_mode.done = &ack;
276
277 spin_lock_irqsave(&msm_rpm_lock, flags);
278 spin_lock(&msm_rpm_irq_lock);
279
280 BUG_ON(msm_rpm_request);
281 msm_rpm_request = &msm_rpm_request_irq_mode;
282
283 for (i = 0; i < count; i++) {
284 BUG_ON(req[i].id > MSM_RPM_ID_LAST);
285 msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value);
286 }
287
288 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
289 MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE);
290 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask);
291
292 /* Ensure RPM data is written before sending the interrupt */
293 mb();
294 msm_rpm_send_req_interrupt();
295
296 spin_unlock(&msm_rpm_irq_lock);
297 spin_unlock_irqrestore(&msm_rpm_lock, flags);
298
299 wait_for_completion(&ack);
300
301 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
302 != ctx_mask);
303 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
304
305 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
306 ? -ENOSPC : 0;
307}
308
309/* Upon return, the <req> array will contain values from the ack page.
310 *
311 * Note: assumes caller has acquired <msm_rpm_lock>.
312 *
313 * Return value:
314 * 0: success
315 * -ENOSPC: request rejected
316 */
317static int msm_rpm_set_exclusive_noirq(int ctx,
318 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
319{
320 unsigned int irq = msm_rpm_platform->irq_ack;
321 unsigned long flags;
322 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600323 uint32_t ctx_mask_ack = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324 uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE];
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600325 struct irq_chip *irq_chip = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326 int i;
327
328 msm_rpm_request_poll_mode.req = req;
329 msm_rpm_request_poll_mode.count = count;
330 msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack;
331 msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack;
332 msm_rpm_request_poll_mode.done = NULL;
333
334 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600335 irq_chip = irq_get_chip(irq);
336 if (!irq_chip) {
337 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
338 return -ENOSPC;
339 }
340 irq_chip->irq_mask(irq_get_irq_data(irq));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341
342 if (msm_rpm_request) {
343 msm_rpm_busy_wait_for_request_completion(true);
344 BUG_ON(msm_rpm_request);
345 }
346
347 msm_rpm_request = &msm_rpm_request_poll_mode;
348
349 for (i = 0; i < count; i++) {
350 BUG_ON(req[i].id > MSM_RPM_ID_LAST);
351 msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value);
352 }
353
354 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
355 MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE);
356 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask);
357
358 /* Ensure RPM data is written before sending the interrupt */
359 mb();
360 msm_rpm_send_req_interrupt();
361
362 msm_rpm_busy_wait_for_request_completion(false);
363 BUG_ON(msm_rpm_request);
364
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600365 irq_chip->irq_unmask(irq_get_irq_data(irq));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
367
368 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
369 != ctx_mask);
370 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
371
372 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
373 ? -ENOSPC : 0;
374}
375
376/* Upon return, the <req> array will contain values from the ack page.
377 *
378 * Return value:
379 * 0: success
380 * -EINTR: interrupted
381 * -EINVAL: invalid <ctx> or invalid id in <req> array
382 * -ENOSPC: request rejected
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700383 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 */
385static int msm_rpm_set_common(
386 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
387{
388 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
389 int rc;
390
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700391 if (!msm_rpm_platform) {
392 if (cpu_is_apq8064())
393 return 0;
394 else
395 return -ENODEV;
396 }
397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
399 rc = -EINVAL;
400 goto set_common_exit;
401 }
402
403 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
404 if (rc)
405 goto set_common_exit;
406
407 if (noirq) {
408 unsigned long flags;
409
410 spin_lock_irqsave(&msm_rpm_lock, flags);
411 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count);
412 spin_unlock_irqrestore(&msm_rpm_lock, flags);
413 } else {
414 rc = mutex_lock_interruptible(&msm_rpm_mutex);
415 if (rc)
416 goto set_common_exit;
417
418 rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count);
419 mutex_unlock(&msm_rpm_mutex);
420 }
421
422set_common_exit:
423 return rc;
424}
425
426/*
427 * Return value:
428 * 0: success
429 * -EINTR: interrupted
430 * -EINVAL: invalid <ctx> or invalid id in <req> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700431 * -ENODEV: RPM driver not initialized.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 */
433static int msm_rpm_clear_common(
434 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
435{
436 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
437 struct msm_rpm_iv_pair r[MSM_RPM_SEL_MASK_SIZE];
438 int rc;
439 int i;
440
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700441 if (!msm_rpm_platform) {
442 if (cpu_is_apq8064())
443 return 0;
444 else
445 return -ENODEV;
446 }
447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
449 rc = -EINVAL;
450 goto clear_common_exit;
451 }
452
453 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
454 if (rc)
455 goto clear_common_exit;
456
457 for (i = 0; i < ARRAY_SIZE(r); i++) {
458 r[i].id = MSM_RPM_ID_INVALIDATE_0 + i;
459 r[i].value = sel_masks[i];
460 }
461
462 memset(sel_masks, 0, sizeof(sel_masks));
463 sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_INVALIDATE)] |=
464 msm_rpm_get_sel_mask(MSM_RPM_SEL_INVALIDATE);
465
466 if (noirq) {
467 unsigned long flags;
468
469 spin_lock_irqsave(&msm_rpm_lock, flags);
470 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r,
471 ARRAY_SIZE(r));
472 spin_unlock_irqrestore(&msm_rpm_lock, flags);
473 BUG_ON(rc);
474 } else {
475 rc = mutex_lock_interruptible(&msm_rpm_mutex);
476 if (rc)
477 goto clear_common_exit;
478
479 rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r));
480 mutex_unlock(&msm_rpm_mutex);
481 BUG_ON(rc);
482 }
483
484clear_common_exit:
485 return rc;
486}
487
488/*
489 * Note: assumes caller has acquired <msm_rpm_mutex>.
490 */
491static void msm_rpm_update_notification(uint32_t ctx,
492 struct msm_rpm_notif_config *curr_cfg,
493 struct msm_rpm_notif_config *new_cfg)
494{
495 if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) {
496 uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {};
497 int rc;
498
499 sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_NOTIFICATION)]
500 |= msm_rpm_get_sel_mask(MSM_RPM_SEL_NOTIFICATION);
501
502 rc = msm_rpm_set_exclusive(ctx,
503 sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv));
504 BUG_ON(rc);
505
506 memcpy(curr_cfg, new_cfg, sizeof(*new_cfg));
507 }
508}
509
510/*
511 * Note: assumes caller has acquired <msm_rpm_mutex>.
512 */
513static void msm_rpm_initialize_notification(void)
514{
515 struct msm_rpm_notif_config cfg;
516 unsigned int ctx;
517 int i;
518
519 for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) {
520 cfg = msm_rpm_notif_cfgs[ctx];
521
522 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) {
523 configured_iv(&cfg)[i].id =
524 MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i;
525 configured_iv(&cfg)[i].value = ~0UL;
526
527 registered_iv(&cfg)[i].id =
528 MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i;
529 registered_iv(&cfg)[i].value = 0;
530 }
531
532 msm_rpm_update_notification(ctx,
533 &msm_rpm_notif_cfgs[ctx], &cfg);
534 }
535}
536
537/******************************************************************************
538 * Public functions
539 *****************************************************************************/
540
541int msm_rpm_local_request_is_outstanding(void)
542{
543 unsigned long flags;
544 int outstanding = 0;
545
546 if (!spin_trylock_irqsave(&msm_rpm_lock, flags))
547 goto local_request_is_outstanding_exit;
548
549 if (!spin_trylock(&msm_rpm_irq_lock))
550 goto local_request_is_outstanding_unlock;
551
552 outstanding = (msm_rpm_request != NULL);
553 spin_unlock(&msm_rpm_irq_lock);
554
555local_request_is_outstanding_unlock:
556 spin_unlock_irqrestore(&msm_rpm_lock, flags);
557
558local_request_is_outstanding_exit:
559 return outstanding;
560}
561
562/*
563 * Read the specified status registers and return their values.
564 *
565 * status: array of id-value pairs. Each <id> specifies a status register,
566 * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will
567 * contain the value of the status register.
568 * count: number of id-value pairs in the array
569 *
570 * Return value:
571 * 0: success
572 * -EBUSY: RPM is updating the status page; values across different registers
573 * may not be consistent
574 * -EINVAL: invalid id in <status> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700575 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 */
577int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count)
578{
579 uint32_t seq_begin;
580 uint32_t seq_end;
581 int rc;
582 int i;
583
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700584 if (!msm_rpm_platform) {
585 if (cpu_is_apq8064())
586 return 0;
587 else
588 return -ENODEV;
589 }
590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS,
592 MSM_RPM_STATUS_ID_SEQUENCE);
593
594 for (i = 0; i < count; i++) {
595 if (status[i].id > MSM_RPM_STATUS_ID_LAST) {
596 rc = -EINVAL;
597 goto get_status_exit;
598 }
599
600 status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS,
601 status[i].id);
602 }
603
604 seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS,
605 MSM_RPM_STATUS_ID_SEQUENCE);
606
607 rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
608
609get_status_exit:
610 return rc;
611}
612EXPORT_SYMBOL(msm_rpm_get_status);
613
614/*
615 * Issue a resource request to RPM to set resource values.
616 *
617 * Note: the function may sleep and must be called in a task context.
618 *
619 * ctx: the request's context.
620 * There two contexts that a RPM driver client can use:
621 * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values
622 * that are intended to take effect when the CPU is active,
623 * MSM_RPM_CTX_SET_0 should be used. For resource values that are
624 * intended to take effect when the CPU is not active,
625 * MSM_RPM_CTX_SET_SLEEP should be used.
626 * req: array of id-value pairs. Each <id> specifies a RPM resource,
627 * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested
628 * resource value.
629 * count: number of id-value pairs in the array
630 *
631 * Return value:
632 * 0: success
633 * -EINTR: interrupted
634 * -EINVAL: invalid <ctx> or invalid id in <req> array
635 * -ENOSPC: request rejected
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700636 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 */
638int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count)
639{
640 return msm_rpm_set_common(ctx, req, count, false);
641}
642EXPORT_SYMBOL(msm_rpm_set);
643
644/*
645 * Issue a resource request to RPM to set resource values.
646 *
647 * Note: the function is similar to msm_rpm_set() except that it must be
648 * called with interrupts masked. If possible, use msm_rpm_set()
649 * instead, to maximize CPU throughput.
650 */
651int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
652{
653 WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called "
654 "safely when local irqs are disabled. Consider using "
655 "msm_rpm_set or msm_rpm_set_nosleep instead.");
656 return msm_rpm_set_common(ctx, req, count, true);
657}
658EXPORT_SYMBOL(msm_rpm_set_noirq);
659
660/*
661 * Issue a resource request to RPM to clear resource values. Once the
662 * values are cleared, the resources revert back to their default values
663 * for this RPM master.
664 *
665 * Note: the function may sleep and must be called in a task context.
666 *
667 * ctx: the request's context.
668 * req: array of id-value pairs. Each <id> specifies a RPM resource,
669 * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored.
670 * count: number of id-value pairs in the array
671 *
672 * Return value:
673 * 0: success
674 * -EINTR: interrupted
675 * -EINVAL: invalid <ctx> or invalid id in <req> array
676 */
677int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
678{
679 return msm_rpm_clear_common(ctx, req, count, false);
680}
681EXPORT_SYMBOL(msm_rpm_clear);
682
683/*
684 * Issue a resource request to RPM to clear resource values.
685 *
686 * Note: the function is similar to msm_rpm_clear() except that it must be
687 * called with interrupts masked. If possible, use msm_rpm_clear()
688 * instead, to maximize CPU throughput.
689 */
690int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
691{
692 WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called "
693 "safely when local irqs are disabled. Consider using "
694 "msm_rpm_clear or msm_rpm_clear_nosleep instead.");
695 return msm_rpm_clear_common(ctx, req, count, true);
696}
697EXPORT_SYMBOL(msm_rpm_clear_noirq);
698
699/*
700 * Register for RPM notification. When the specified resources
701 * change their status on RPM, RPM sends out notifications and the
702 * driver will "up" the semaphore in struct msm_rpm_notification.
703 *
704 * Note: the function may sleep and must be called in a task context.
705 *
706 * Memory for <n> must not be freed until the notification is
707 * unregistered. Memory for <req> can be freed after this
708 * function returns.
709 *
710 * n: the notifcation object. Caller should initialize only the
711 * semaphore field. When a notification arrives later, the
712 * semaphore will be "up"ed.
713 * req: array of id-value pairs. Each <id> specifies a status register,
714 * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored.
715 * count: number of id-value pairs in the array
716 *
717 * Return value:
718 * 0: success
719 * -EINTR: interrupted
720 * -EINVAL: invalid id in <req> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700721 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 */
723int msm_rpm_register_notification(struct msm_rpm_notification *n,
724 struct msm_rpm_iv_pair *req, int count)
725{
726 unsigned long flags;
727 unsigned int ctx;
728 struct msm_rpm_notif_config cfg;
729 int rc;
730 int i;
731
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700732 if (!msm_rpm_platform) {
733 if (cpu_is_apq8064())
734 return 0;
735 else
736 return -ENODEV;
737 }
738
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 INIT_LIST_HEAD(&n->list);
740 rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count);
741 if (rc)
742 goto register_notification_exit;
743
744 rc = mutex_lock_interruptible(&msm_rpm_mutex);
745 if (rc)
746 goto register_notification_exit;
747
748 if (!msm_rpm_init_notif_done) {
749 msm_rpm_initialize_notification();
750 msm_rpm_init_notif_done = true;
751 }
752
753 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
754 list_add(&n->list, &msm_rpm_notifications);
755 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
756
757 ctx = MSM_RPM_CTX_SET_0;
758 cfg = msm_rpm_notif_cfgs[ctx];
759
760 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
761 registered_iv(&cfg)[i].value |= n->sel_masks[i];
762
763 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
764 mutex_unlock(&msm_rpm_mutex);
765
766register_notification_exit:
767 return rc;
768}
769EXPORT_SYMBOL(msm_rpm_register_notification);
770
771/*
772 * Unregister a notification.
773 *
774 * Note: the function may sleep and must be called in a task context.
775 *
776 * n: the notifcation object that was registered previously.
777 *
778 * Return value:
779 * 0: success
780 * -EINTR: interrupted
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700781 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782 */
783int msm_rpm_unregister_notification(struct msm_rpm_notification *n)
784{
785 unsigned long flags;
786 unsigned int ctx;
787 struct msm_rpm_notif_config cfg;
788 int rc;
789 int i;
790
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700791 if (!msm_rpm_platform) {
792 if (cpu_is_apq8064())
793 return 0;
794 else
795 return -ENODEV;
796 }
797
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 rc = mutex_lock_interruptible(&msm_rpm_mutex);
799 if (rc)
800 goto unregister_notification_exit;
801
802 ctx = MSM_RPM_CTX_SET_0;
803 cfg = msm_rpm_notif_cfgs[ctx];
804
805 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
806 registered_iv(&cfg)[i].value = 0;
807
808 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
809 list_del(&n->list);
810 list_for_each_entry(n, &msm_rpm_notifications, list)
811 for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++)
812 registered_iv(&cfg)[i].value |= n->sel_masks[i];
813 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
814
815 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
816 mutex_unlock(&msm_rpm_mutex);
817
818unregister_notification_exit:
819 return rc;
820}
821EXPORT_SYMBOL(msm_rpm_unregister_notification);
822
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600823static uint32_t fw_major, fw_minor, fw_build;
824
825static ssize_t driver_version_show(struct kobject *kobj,
826 struct kobj_attribute *attr, char *buf)
827{
828 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
829 RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER);
830}
831
832static ssize_t fw_version_show(struct kobject *kobj,
833 struct kobj_attribute *attr, char *buf)
834{
835 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
836 fw_major, fw_minor, fw_build);
837}
838
839static struct kobj_attribute driver_version_attr = __ATTR_RO(driver_version);
840static struct kobj_attribute fw_version_attr = __ATTR_RO(fw_version);
841
842static struct attribute *driver_attributes[] = {
843 &driver_version_attr.attr,
844 &fw_version_attr.attr,
845 NULL
846};
847
848static struct attribute_group driver_attr_group = {
849 .attrs = driver_attributes,
850};
851
852static int __devinit msm_rpm_probe(struct platform_device *pdev)
853{
854 return sysfs_create_group(&pdev->dev.kobj, &driver_attr_group);
855}
856
857static int __devexit msm_rpm_remove(struct platform_device *pdev)
858{
859 sysfs_remove_group(&pdev->dev.kobj, &driver_attr_group);
860 return 0;
861}
862
863static struct platform_driver msm_rpm_platform_driver = {
864 .probe = msm_rpm_probe,
865 .remove = __devexit_p(msm_rpm_remove),
866 .driver = {
867 .name = "msm_rpm",
868 .owner = THIS_MODULE,
869 },
870};
871
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700872static void __init msm_rpm_populate_map(void)
873{
874 int i, k;
875
876 for (i = 0; i < ARRAY_SIZE(msm_rpm_map); i++)
877 msm_rpm_map[i] = MSM_RPM_SEL_LAST + 1;
878
879 for (i = 0; i < rpm_map_data_size; i++) {
880 struct msm_rpm_map_data *raw_data = &rpm_map_data[i];
881
882 for (k = 0; k < raw_data->count; k++)
883 msm_rpm_map[raw_data->id + k] = raw_data->sel;
884 }
885}
886
887int __init msm_rpm_init(struct msm_rpm_platform_data *data)
888{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889 unsigned int irq;
890 int rc;
891
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700892 if (cpu_is_apq8064())
893 return 0;
894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 msm_rpm_platform = data;
896
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600897 fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 MSM_RPM_STATUS_ID_VERSION_MAJOR);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600899 fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900 MSM_RPM_STATUS_ID_VERSION_MINOR);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600901 fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 MSM_RPM_STATUS_ID_VERSION_BUILD);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600903 pr_info("%s: RPM firmware %u.%u.%u\n", __func__,
904 fw_major, fw_minor, fw_build);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600906 if (fw_major != RPM_MAJOR_VER) {
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600907 pr_err("%s: RPM version %u.%u.%u incompatible with "
908 "this driver version %u.%u.%u\n", __func__,
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600909 fw_major, fw_minor, fw_build,
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600910 RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER);
911 return -EFAULT;
912 }
913
914 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MAJOR,
915 RPM_MAJOR_VER);
916 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MINOR,
917 RPM_MINOR_VER);
918 msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_BUILD,
919 RPM_BUILD_VER);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920
921 irq = msm_rpm_platform->irq_ack;
922
923 rc = request_irq(irq, msm_rpm_ack_interrupt,
924 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
925 "rpm_drv", msm_rpm_ack_interrupt);
926 if (rc) {
927 pr_err("%s: failed to request irq %d: %d\n",
928 __func__, irq, rc);
929 return rc;
930 }
931
932 rc = irq_set_irq_wake(irq, 1);
933 if (rc) {
934 pr_err("%s: failed to set wakeup irq %u: %d\n",
935 __func__, irq, rc);
936 return rc;
937 }
938
939 msm_rpm_populate_map();
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600940
941 return platform_driver_register(&msm_rpm_platform_driver);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942}