blob: 3ef39c768067c714da11ce16c3fdc46a79e245b5 [file] [log] [blame]
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/module.h>
Kaushal Kumar92cba642012-09-07 16:34:02 +053018#include <linux/moduleparam.h>
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053019#include <linux/kernel.h>
20#include <linux/io.h>
21#include <linux/irq.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/debugfs.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/cpufreq.h>
30#include <linux/iopoll.h>
31#include <linux/delay.h>
32#include <linux/regulator/consumer.h>
33
34#include <mach/irqs.h>
35
36#include "msm_cpr.h"
37
38#define MODULE_NAME "msm-cpr"
39
40/* Need platform device handle for suspend and resume APIs */
41static struct platform_device *cpr_pdev;
42
Kaushal Kumar92cba642012-09-07 16:34:02 +053043static bool enable = 1;
44module_param(enable, bool, 0644);
45MODULE_PARM_DESC(enable, "CPR Enable");
46
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053047struct msm_cpr {
48 int curr_osc;
49 int cpr_mode;
50 int prev_mode;
51 uint32_t floor;
52 uint32_t ceiling;
53 void __iomem *base;
54 unsigned int irq;
55 struct mutex cpr_mutex;
56 struct regulator *vreg_cx;
57 const struct msm_cpr_config *config;
58 struct notifier_block freq_transition;
59 struct msm_cpr_vp_data *vp;
60};
61
62/* Need to maintain state data for suspend and resume APIs */
63static struct msm_cpr_reg cpr_save_state;
64
65static inline
66void cpr_write_reg(struct msm_cpr *cpr, u32 offset, u32 value)
67{
68 writel_relaxed(value, cpr->base + offset);
69}
70
71static inline u32 cpr_read_reg(struct msm_cpr *cpr, u32 offset)
72{
73 return readl_relaxed(cpr->base + offset);
74}
75
76static
77void cpr_modify_reg(struct msm_cpr *cpr, u32 offset, u32 mask, u32 value)
78{
79 u32 reg_val;
80
81 reg_val = readl_relaxed(cpr->base + offset);
82 reg_val &= ~mask;
83 reg_val |= value;
84 writel_relaxed(reg_val, cpr->base + offset);
85}
86
87#ifdef DEBUG
88static void cpr_regs_dump_all(struct msm_cpr *cpr)
89{
90 pr_debug("RBCPR_GCNT_TARGET(%d): 0x%x\n",
91 cpr->curr_osc, readl_relaxed(cpr->base +
92 RBCPR_GCNT_TARGET(cpr->curr_osc)));
93 pr_debug("RBCPR_TIMER_INTERVAL: 0x%x\n",
94 readl_relaxed(cpr->base + RBCPR_TIMER_INTERVAL));
95 pr_debug("RBIF_TIMER_ADJUST: 0x%x\n",
96 readl_relaxed(cpr->base + RBIF_TIMER_ADJUST));
97 pr_debug("RBIF_LIMIT: 0x%x\n",
98 readl_relaxed(cpr->base + RBIF_LIMIT));
99 pr_debug("RBCPR_STEP_QUOT: 0x%x\n",
100 readl_relaxed(cpr->base + RBCPR_STEP_QUOT));
101 pr_debug("RBIF_SW_VLEVEL: 0x%x\n",
102 readl_relaxed(cpr->base + RBIF_SW_VLEVEL));
103 pr_debug("RBCPR_DEBUG1: 0x%x\n",
104 readl_relaxed(cpr->base + RBCPR_DEBUG1));
105 pr_debug("RBCPR_RESULT_0: 0x%x\n",
106 readl_relaxed(cpr->base + RBCPR_RESULT_0));
107 pr_debug("RBCPR_RESULT_1: 0x%x\n",
108 readl_relaxed(cpr->base + RBCPR_RESULT_1));
109 pr_debug("RBCPR_QUOT_AVG: 0x%x\n",
110 readl_relaxed(cpr->base + RBCPR_QUOT_AVG));
111 pr_debug("RBCPR_CTL: 0x%x\n",
112 readl_relaxed(cpr->base + RBCPR_CTL));
113 pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
114 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
115 pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
116 cpr_read_reg(cpr, RBIF_IRQ_STATUS));
117}
118#endif
119
120/* Enable the CPR H/W Block */
121static void cpr_enable(struct msm_cpr *cpr)
122{
123 mutex_lock(&cpr->cpr_mutex);
124 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
125 mutex_unlock(&cpr->cpr_mutex);
126}
127
128/* Disable the CPR H/W Block */
129static void cpr_disable(struct msm_cpr *cpr)
130{
131 mutex_lock(&cpr->cpr_mutex);
132 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
133 mutex_unlock(&cpr->cpr_mutex);
134}
135
136static int32_t cpr_poll_result(struct msm_cpr *cpr)
137{
138 uint32_t val = 0;
139 int8_t rc = 0;
140
141 rc = readl_poll_timeout(cpr->base + RBCPR_RESULT_0, val, ~val & BUSY_M,
142 10, 1000);
143 if (rc)
144 pr_info("%s: RBCPR_RESULT_0 read error: %d\n",
145 __func__, rc);
146 return rc;
147}
148
149static int32_t cpr_poll_result_done(struct msm_cpr *cpr)
150{
151 uint32_t val = 0;
152 int8_t rc = 0;
153
154 rc = readl_poll_timeout(cpr->base + RBIF_IRQ_STATUS, val, val & 0x1,
155 10, 1000);
156 if (rc)
157 pr_info("%s: RBCPR_IRQ_STATUS read error: %d\n",
158 __func__, rc);
159 return rc;
160}
161
162static void
163cpr_2pt_kv_analysis(struct msm_cpr *cpr, struct msm_cpr_mode *chip_data)
164{
165 int32_t tgt_volt_mV = 0, level_uV, rc;
166 uint32_t quot1, quot2;
167
168 /**
169 * 2 Point KV Analysis to calculate Step Quot
170 * STEP_QUOT is number of QUOT units per PMIC step
171 * STEP_QUOT = (quot1 - quot2) / 4
172 *
173 * The step quot is calculated once for every mode and stored for
174 * later use.
175 */
176 if (chip_data->step_quot != ~0)
177 goto out_2pt_kv;
178
179 /**
180 * Using the value from chip_data->tgt_volt_offset
181 * calculate the new PMIC adjusted voltages and set
182 * the PMIC to provide this value.
183 *
184 * Assuming default voltage is the highest value of safe boot up
185 * voltage, offset is always subtracted from it.
186 *
187 */
188 if (chip_data->tgt_volt_offset > 0) {
189 tgt_volt_mV = chip_data->calibrated_mV -
190 (chip_data->tgt_volt_offset * cpr->vp->step_size);
191 }
192 pr_debug("tgt_volt_mV = %d, calibrated_mV = %d", tgt_volt_mV,
193 chip_data->calibrated_mV);
194
195 /* level_uV = tgt_volt_mV * 1000; */
196 level_uV = 1350000;
197 /* Call the PMIC specific routine to set the voltage */
198 rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
199 if (rc) {
200 pr_err("%s: Initial voltage set at %duV failed. %d\n",
201 __func__, level_uV, rc);
202 return;
203 }
204 rc = regulator_enable(cpr->vreg_cx);
205 if (rc) {
206 pr_err("failed to enable %s, rc=%d\n", "vdd_cx", rc);
207 return;
208 }
209
210 /* Store the adjusted value of voltage */
211 chip_data->calibrated_mV = 1300;
212
213 /* Take first CPR measurement at a higher voltage to get QUOT1 */
214
215 /* Enable the Software mode of operation */
216 cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
217
218 /* Enable the cpr measurement */
219 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
220
221 /* IRQ is already disabled */
222 rc = cpr_poll_result_done(cpr);
223 if (rc) {
224 pr_err("%s: Quot1: Exiting due to INT_DONE poll timeout\n",
225 __func__);
226 return;
227 }
228
229 rc = cpr_poll_result(cpr);
230 if (rc) {
231 pr_err("%s: Quot1: Exiting due to BUSY poll timeout\n",
232 __func__);
233 return;
234 }
235
236 quot1 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
237
238 /* Take second CPR measurement at a lower voltage to get QUOT2 */
239 level_uV = 1300000;
240
241 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
242 /* Call the PMIC specific routine to set the voltage */
243 rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
244 if (rc) {
245 pr_err("%s: Voltage set at %duV failed. %d\n",
246 __func__, level_uV, rc);
247 return;
248 }
249
250 cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
251 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
252
253 /* cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1); */
254 rc = cpr_poll_result_done(cpr);
255 if (rc) {
256 pr_err("%s: Quot2: Exiting due to INT_DONE poll timeout\n",
257 __func__);
258 goto err_poll_result_done;
259 }
260 /* IRQ is already disabled */
261 rc = cpr_poll_result(cpr);
262 if (rc) {
263 pr_err("%s: Quot2: Exiting due to BUSY poll timeout\n",
264 __func__);
265 goto err_poll_result;
266 }
267 quot2 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
268 chip_data->step_quot = (quot1 - quot2) / 4;
269 pr_debug("%s: Calculated Step Quot is %d\n",
270 __func__, chip_data->step_quot);
271 /* Disable the cpr */
272 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
273
274out_2pt_kv:
275 /* Program the step quot */
276 cpr_write_reg(cpr, RBCPR_STEP_QUOT, (chip_data->step_quot & 0xFF));
277 return;
278err_poll_result:
279err_poll_result_done:
280 regulator_disable(cpr->vreg_cx);
281}
282
283static inline
284void cpr_irq_clr_and_ack(struct msm_cpr *cpr, uint32_t mask)
285{
286 /* Clear the interrupt */
287 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
288 /* Acknowledge the Recommendation */
289 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
290}
291
292static inline
293void cpr_irq_clr_and_nack(struct msm_cpr *cpr, uint32_t mask)
294{
295 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
296 cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1);
297}
298
299static void cpr_irq_set(struct msm_cpr *cpr, uint32_t irq, bool enable)
300{
301 uint32_t irq_enabled;
302
303 irq_enabled = cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
304 if (enable == 1)
305 irq_enabled |= irq;
306 else
307 irq_enabled &= ~irq;
308 cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
309 INT_MASK, irq_enabled);
310}
311
312static void
313cpr_up_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
314{
315 int rc, set_volt_mV;
316 struct msm_cpr_mode *chip_data;
317
318 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
319
320 /**
321 * FIXME: Need to handle a potential race condition between
322 * freq switch handler and CPR interrupt handler here
323 */
324 /* Set New PMIC voltage */
325 set_volt_mV = (new_volt < chip_data->Vmax ? new_volt
326 : chip_data->Vmax);
327 rc = regulator_set_voltage(cpr->vreg_cx, set_volt_mV * 1000,
328 set_volt_mV * 1000);
329 if (rc) {
330 pr_err("%s: Voltage set at %dmV failed. %d\n",
331 __func__, set_volt_mV, rc);
332 cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
333 return;
334 }
335 pr_debug("%s: Voltage set at %dmV\n", __func__, set_volt_mV);
336
337 /**
338 * Save the new calibrated voltage to be re-used
339 * whenever we return to same mode after a mode switch.
340 */
341 chip_data->calibrated_mV = set_volt_mV;
342
343 /* Clear all the interrupts */
344 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
345
346 /* Disable Auto ACK for Down interrupts */
347 cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_NACK_DN_EN_M, 0);
348
349 /* Enable down interrupts to App as it might have got disabled if CPR
350 * hit Vmin earlier. Voltage set is above Vmin now.
351 */
352 cpr_irq_set(cpr, DOWN_INT, 1);
353
354 /* Acknowledge the Recommendation */
355 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
356}
357
358static void
359cpr_dn_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
360{
361 int rc, set_volt_mV;
362 struct msm_cpr_mode *chip_data;
363
364 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
365
366 /**
367 * FIXME: Need to handle a potential race condition between
368 * freq switch handler and CPR interrupt handler here
369 */
370 /* Set New PMIC volt */
371 set_volt_mV = (new_volt > chip_data->Vmin ? new_volt
372 : chip_data->Vmin);
373 rc = regulator_set_voltage(cpr->vreg_cx, set_volt_mV * 1000,
374 set_volt_mV * 1000);
375 if (rc) {
376 pr_err("%s: Voltage at %dmV failed %d\n",
377 __func__, set_volt_mV, rc);
378 cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
379 return;
380 }
381 pr_debug("%s: Voltage set at %dmV\n", __func__, set_volt_mV);
382
383 /**
384 * Save the new calibrated voltage to be re-used
385 * whenever we return to same mode after a mode switch.
386 */
387 chip_data->calibrated_mV = set_volt_mV;
388
389 /* Clear all the interrupts */
390 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
391
392 if (new_volt <= chip_data->Vmin) {
393 /*
394 * Disable down interrupt to App after we hit Vmin
395 * It shall be enabled after we service an up interrupt
396 *
397 * A race condition between freq switch handler and CPR
398 * interrupt handler is possible. So, do not disable
399 * interrupt if a freq switch already caused a mode
400 * change since we need this interrupt in the new mode.
401 */
402 if (cpr->cpr_mode == cpr->prev_mode) {
403 /* Enable Auto ACK for CPR Down Flags
404 * while DOWN_INT to App is disabled */
405 cpr_modify_reg(cpr, RBCPR_CTL,
406 SW_AUTO_CONT_NACK_DN_EN_M,
407 SW_AUTO_CONT_NACK_DN_EN);
408 cpr_irq_set(cpr, DOWN_INT, 0);
409 pr_debug("%s: DOWN_INT disabled\n", __func__);
410 }
411 }
412 /* Acknowledge the Recommendation */
413 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
414}
415
416static void cpr_set_vdd(struct msm_cpr *cpr, enum cpr_action action)
417{
418 uint32_t curr_volt, new_volt, error_step;
419 struct msm_cpr_mode *chip_data;
420
421 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
422 error_step = cpr_read_reg(cpr, RBCPR_RESULT_0) >> 2;
423 error_step &= 0xF;
424 curr_volt = chip_data->calibrated_mV;
425
426 if (action == UP) {
427 /**
428 * Using up margin in the comparison helps avoid having to
429 * change up threshold values in chip register.
430 */
431 if (error_step < (cpr->config->up_threshold +
432 cpr->config->up_margin)) {
433 /* FIXME: Avoid repeated dn interrupts if we are here */
434 pr_debug("UP_INT error step too small to set\n");
435 cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
436 return;
437 }
438
439 /* Calculte new PMIC voltage */
440 new_volt = curr_volt + (error_step * cpr->vp->step_size);
441 pr_debug("UP_INT: new_volt: %d\n", new_volt);
442 cpr_up_event_handler(cpr, new_volt);
443
444 } else if (action == DOWN) {
445 /**
446 * Using down margin in the comparison helps avoid having to
447 * change down threshold values in chip register.
448 */
449 if (error_step < (cpr->config->dn_threshold +
450 cpr->config->dn_margin)) {
451 /* FIXME: Avoid repeated dn interrupts if we are here */
452 pr_debug("DOWN_INT error_step too small to set\n");
453 cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
454 return;
455 }
456
457 /* Calculte new PMIC voltage */
458 new_volt = curr_volt - (error_step * cpr->vp->step_size);
459 pr_debug("DOWN_INT: new_volt: %d\n", new_volt);
460 cpr_dn_event_handler(cpr, new_volt);
461 }
462}
463
464static irqreturn_t cpr_irq0_handler(int irq, void *dev_id)
465{
466 struct msm_cpr *cpr = dev_id;
467 uint32_t reg_val, ctl_reg;
468
469 reg_val = cpr_read_reg(cpr, RBIF_IRQ_STATUS);
470 ctl_reg = cpr_read_reg(cpr, RBCPR_CTL);
471
472 /* Following sequence of handling is as per each IRQ's priority */
473 if (reg_val & BIT(4)) {
474 pr_debug(" CPR:IRQ %d occured for UP Flag\n", irq);
475 cpr_set_vdd(cpr, UP);
476
477 } else if ((reg_val & BIT(2)) && !(ctl_reg & SW_AUTO_CONT_NACK_DN_EN)) {
478 pr_debug(" CPR:IRQ %d occured for Down Flag\n", irq);
479 cpr_set_vdd(cpr, DOWN);
480
481 } else if (reg_val & BIT(1)) {
482 pr_debug(" CPR:IRQ %d occured for Min Flag\n", irq);
483 cpr_irq_clr_and_nack(cpr, BIT(1) | BIT(0));
484
485 } else if (reg_val & BIT(5)) {
486 pr_debug(" CPR:IRQ %d occured for MAX Flag\n", irq);
487 cpr_irq_clr_and_nack(cpr, BIT(5) | BIT(0));
488
489 } else if (reg_val & BIT(3)) {
490 /* SW_AUTO_CONT_ACK_EN is enabled */
491 pr_debug(" CPR:IRQ %d occured for Mid Flag\n", irq);
492 }
493 return IRQ_HANDLED;
494}
495
496static void cpr_config(struct msm_cpr *cpr)
497{
498 uint32_t delay_count, cnt = 0, rc, tmp_uV;
499 struct msm_cpr_mode *chip_data;
500
501 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
502
503 /* Program the SW vlevel */
504 cpr_modify_reg(cpr, RBIF_SW_VLEVEL, SW_VLEVEL_M,
505 cpr->config->sw_vlevel);
506
507 /* Set the floor and ceiling values */
508 cpr->floor = cpr->config->floor;
509 cpr->ceiling = cpr->config->ceiling;
510
511 /* Program the Ceiling & Floor values */
512 cpr_modify_reg(cpr, RBIF_LIMIT, (CEILING_M | FLOOR_M),
513 ((cpr->ceiling << 6) | cpr->floor));
514
515 /* Program the Up and Down Threshold values */
516 cpr_modify_reg(cpr, RBCPR_CTL, UP_THRESHOLD_M | DN_THRESHOLD_M,
517 cpr->config->up_threshold << 24 |
518 cpr->config->dn_threshold << 28);
519
520 cpr->curr_osc = chip_data->ring_osc;
521
522 /**
523 * Program the gate count and target values
524 * for all the ring oscilators
525 */
526 while (cnt < NUM_OSC) {
527 cpr_modify_reg(cpr, RBCPR_GCNT_TARGET(cnt),
528 (GCNT_M | TARGET_M),
529 (chip_data->ring_osc_data[cnt].gcnt << 12 |
530 chip_data->ring_osc_data[cnt].target_count));
531 pr_debug("RBCPR_GCNT_TARGET(%d): = 0x%x\n", cnt,
532 readl_relaxed(cpr->base + RBCPR_GCNT_TARGET(cnt)));
533 cnt++;
534 }
535
536 /* Configure the step quot */
537 cpr_2pt_kv_analysis(cpr, chip_data);
538
539 /**
540 * Call the PMIC specific routine to set the voltage
541 * Set with an extra step since it helps as per
542 * characterization data.
543 */
544 chip_data->calibrated_mV += cpr->vp->step_size;
545 tmp_uV = chip_data->calibrated_mV * 1000;
546 rc = regulator_set_voltage(cpr->vreg_cx, tmp_uV, tmp_uV);
547 if (rc)
548 pr_err("%s: Voltage set failed %d\n", __func__, rc);
549
550 /* Program the Timer for default delay between CPR measurements */
551 delay_count = 0xFFFF;
552 cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL, delay_count);
553
554 /* Enable the Timer */
555 cpr_modify_reg(cpr, RBCPR_CTL, TIMER_M, ENABLE_TIMER);
556
557 /* Enable Auto ACK for Mid interrupts */
558 cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_ACK_EN_M,
559 SW_AUTO_CONT_ACK_EN);
560}
561
562static void cpr_mode_config(struct msm_cpr *cpr, enum cpr_mode mode)
563{
564 if (cpr->cpr_mode == mode)
565 return;
566
567 cpr->cpr_mode = mode;
568 pr_debug("%s: Switching to %s mode\n", __func__,
569 (mode == TURBO_MODE ? "TURBO" : "NORMAL"));
570
571 /* Configure the new mode */
572 cpr_config(cpr);
573}
574
575static int
576cpr_freq_transition(struct notifier_block *nb, unsigned long val,
577 void *data)
578{
579 struct msm_cpr *cpr = container_of(nb, struct msm_cpr, freq_transition);
580 struct cpufreq_freqs *freqs = data;
581
582 switch (val) {
583 case CPUFREQ_PRECHANGE:
584 return 0;
585 pr_debug("pre freq change notification to cpr\n");
586
587 disable_irq(cpr->irq);
588 cpr_disable(cpr);
589 cpr->prev_mode = cpr->cpr_mode;
590 break;
591 case CPUFREQ_POSTCHANGE:
592 return 0;
593 pr_debug("post freq change notification to cpr\n");
594
595 if (freqs->new >= cpr->config->nom_freq_limit)
596 cpr_mode_config(cpr, TURBO_MODE);
597 else
598 cpr_mode_config(cpr, NORMAL_MODE);
599 /**
600 * Enable all interrupts. One of them could be in a disabled
601 * state if vdd had hit Vmax / Vmin earlier
602 */
603 cpr_irq_set(cpr, (UP_INT | DOWN_INT), 1);
604
605 enable_irq(cpr->irq);
606
607 cpr_enable(cpr);
608
609 break;
610 default:
611 break;
612 }
613 return 0;
614}
615
616#ifdef CONFIG_PM
617static int msm_cpr_resume(struct device *dev)
618{
619 struct msm_cpr *cpr = dev_get_drvdata(dev);
620 int osc_num = cpr->config->cpr_mode_data->ring_osc;
621
622 cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL,
623 cpr_save_state.rbif_timer_interval);
624 cpr_write_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
625 cpr_save_state.rbif_int_en);
626 cpr_write_reg(cpr, RBIF_LIMIT,
627 cpr_save_state.rbif_limit);
628 cpr_write_reg(cpr, RBIF_TIMER_ADJUST,
629 cpr_save_state.rbif_timer_adjust);
630 cpr_write_reg(cpr, RBCPR_GCNT_TARGET(osc_num),
631 cpr_save_state.rbcpr_gcnt_target);
632 cpr_write_reg(cpr, RBCPR_STEP_QUOT,
633 cpr_save_state.rbcpr_step_quot);
634 cpr_write_reg(cpr, RBIF_SW_VLEVEL,
635 cpr_save_state.rbif_sw_level);
636
637 cpr_enable(cpr);
638 cpr_write_reg(cpr, RBCPR_CTL,
639 cpr_save_state.rbcpr_ctl);
640 enable_irq(cpr->irq);
641
642 return 0;
643}
644
645static int msm_cpr_suspend(struct device *dev)
646
647{
648 struct msm_cpr *cpr = dev_get_drvdata(dev);
649 int osc_num = cpr->config->cpr_mode_data->ring_osc;
650
651 cpr_save_state.rbif_timer_interval =
652 cpr_read_reg(cpr, RBCPR_TIMER_INTERVAL);
653 cpr_save_state.rbif_int_en =
654 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
655 cpr_save_state.rbif_limit =
656 cpr_read_reg(cpr, RBIF_LIMIT);
657 cpr_save_state.rbif_timer_adjust =
658 cpr_read_reg(cpr, RBIF_TIMER_ADJUST);
659 cpr_save_state.rbcpr_gcnt_target =
660 cpr_read_reg(cpr, RBCPR_GCNT_TARGET(osc_num));
661 cpr_save_state.rbcpr_step_quot =
662 cpr_read_reg(cpr, RBCPR_STEP_QUOT);
663 cpr_save_state.rbif_sw_level =
664 cpr_read_reg(cpr, RBIF_SW_VLEVEL);
665 cpr_save_state.rbcpr_ctl =
666 cpr_read_reg(cpr, RBCPR_CTL);
667
668 disable_irq(cpr->irq);
669 cpr_disable(cpr);
670
671 return 0;
672}
673
674void msm_cpr_pm_resume(void)
675{
676 msm_cpr_resume(&cpr_pdev->dev);
677}
678EXPORT_SYMBOL(msm_cpr_pm_resume);
679
680void msm_cpr_pm_suspend(void)
681{
682 msm_cpr_suspend(&cpr_pdev->dev);
683}
684EXPORT_SYMBOL(msm_cpr_pm_suspend);
685#endif
686
687void msm_cpr_disable(void)
688{
689 struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
690 cpr_disable(cpr);
691}
692EXPORT_SYMBOL(msm_cpr_disable);
693
694void msm_cpr_enable(void)
695{
696 struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
697 cpr_enable(cpr);
698}
699EXPORT_SYMBOL(msm_cpr_enable);
700
701static int __devinit msm_cpr_probe(struct platform_device *pdev)
702{
703 int res, irqn, irq_enabled;
704 struct msm_cpr *cpr;
705 const struct msm_cpr_config *pdata = pdev->dev.platform_data;
706 void __iomem *base;
707 struct resource *mem;
708
Kaushal Kumar92cba642012-09-07 16:34:02 +0530709 if (!enable)
710 return -EPERM;
711
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530712 if (!pdata) {
713 pr_err("CPR: Platform data is not available\n");
714 return -EIO;
715 }
716
717 cpr = devm_kzalloc(&pdev->dev, sizeof(struct msm_cpr), GFP_KERNEL);
718 if (!cpr)
719 return -ENOMEM;
720
721 /* Initialize platform_data */
722 cpr->config = pdata;
723
724 cpr_pdev = pdev;
725
726 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
727 if (!mem || !mem->start) {
728 pr_err("CPR: get resource failed\n");
729 res = -ENXIO;
730 goto out;
731 }
732
733 base = ioremap_nocache(mem->start, resource_size(mem));
734 if (!base) {
735 pr_err("CPR: ioremap failed\n");
736 res = -ENOMEM;
737 goto out;
738 }
739
740 if (cpr->config->irq_line < 0) {
741 pr_err("CPR: Invalid IRQ line specified\n");
742 res = -ENXIO;
743 goto err_ioremap;
744 }
745 irqn = platform_get_irq(pdev, cpr->config->irq_line);
746 if (irqn < 0) {
747 pr_err("CPR: Unable to get irq\n");
748 res = -ENXIO;
749 goto err_ioremap;
750 }
751
752 cpr->irq = irqn;
753
754 cpr->base = base;
755
756 cpr->vp = pdata->vp_data;
757
758 mutex_init(&cpr->cpr_mutex);
759
760 /* Initialize the Voltage domain for CPR */
761 cpr->vreg_cx = regulator_get(&pdev->dev, "vddx_cx");
762 if (IS_ERR(cpr->vreg_cx)) {
763 res = PTR_ERR(cpr->vreg_cx);
764 pr_err("could not get regulator: %d\n", res);
765 goto err_reg_get;
766 }
767
768 /* Assume current mode is TURBO Mode */
769 cpr->cpr_mode = TURBO_MODE;
770 cpr->prev_mode = TURBO_MODE;
771
772 /* Initial configuration of CPR */
773 cpr_config(cpr);
774
775 platform_set_drvdata(pdev, cpr);
776
777 /* Initialze the Debugfs Entry for cpr */
778 res = msm_cpr_debug_init(cpr->base);
779 if (res) {
780 pr_err("CPR: Debugfs Creation Failed\n");
781 goto err_ioremap;
782 }
783
784 /* Register the interrupt handler for IRQ 0 */
785 res = request_threaded_irq(irqn, NULL, cpr_irq0_handler,
786 IRQF_TRIGGER_RISING, "msm-cpr-irq0", cpr);
787 if (res) {
788 pr_err("CPR: request irq failed for IRQ %d\n", irqn);
789 goto err_ioremap;
790 }
791
792 /**
793 * Enable the requested interrupt lines.
794 * Do not enable MID_INT since we shall use
795 * SW_AUTO_CONT_ACK_EN bit.
796 */
797 irq_enabled = INT_MASK & ~MID_INT;
798 cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
799 INT_MASK, irq_enabled);
800
801 /* Enable the cpr */
802 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
803
804
805 cpr->freq_transition.notifier_call = cpr_freq_transition;
806 cpufreq_register_notifier(&cpr->freq_transition,
807 CPUFREQ_TRANSITION_NOTIFIER);
808
809 return res;
810
811err_reg_get:
812 free_irq(irqn, cpr);
813err_ioremap:
814 iounmap(base);
815out:
816 return res;
817}
818
819static int __devexit msm_cpr_remove(struct platform_device *pdev)
820{
821 struct msm_cpr *cpr = platform_get_drvdata(pdev);
822
823 cpufreq_unregister_notifier(&cpr->freq_transition,
824 CPUFREQ_TRANSITION_NOTIFIER);
825
826 regulator_disable(cpr->vreg_cx);
827 regulator_put(cpr->vreg_cx);
828 free_irq(cpr->irq, cpr);
829 iounmap(cpr->base);
830 mutex_destroy(&cpr->cpr_mutex);
831 platform_set_drvdata(pdev, NULL);
832
833 return 0;
834}
835
836static const struct dev_pm_ops msm_cpr_dev_pm_ops = {
837 .suspend = msm_cpr_suspend,
838 .resume = msm_cpr_resume,
839};
840
841static struct platform_driver msm_cpr_driver = {
842 .probe = msm_cpr_probe,
843 .remove = __devexit_p(msm_cpr_remove),
844 .driver = {
845 .name = MODULE_NAME,
846 .owner = THIS_MODULE,
847#ifdef CONFIG_PM
848 .pm = &msm_cpr_dev_pm_ops,
849#endif
850 },
851};
852
853static int __init msm_init_cpr(void)
854{
855 return platform_driver_register(&msm_cpr_driver);
856}
857
858module_init(msm_init_cpr);
859
860static void __exit msm_exit_cpr(void)
861{
862 platform_driver_unregister(&msm_cpr_driver);
863}
864
865module_exit(msm_exit_cpr);
866
867MODULE_DESCRIPTION("MSM CPR Driver");
868MODULE_VERSION("1.0");
869MODULE_LICENSE("GPL v2");