blob: f4272f3f48daf41b5077248de7b5fa9fb0232804 [file] [log] [blame]
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/debugfs.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/cpufreq.h>
29#include <linux/iopoll.h>
30#include <linux/delay.h>
31#include <linux/regulator/consumer.h>
32
33#include <mach/irqs.h>
34
35#include "msm_cpr.h"
36
37#define MODULE_NAME "msm-cpr"
38
39/* Need platform device handle for suspend and resume APIs */
40static struct platform_device *cpr_pdev;
41
42struct msm_cpr {
43 int curr_osc;
44 int cpr_mode;
45 int prev_mode;
46 uint32_t floor;
47 uint32_t ceiling;
48 void __iomem *base;
49 unsigned int irq;
50 struct mutex cpr_mutex;
51 struct regulator *vreg_cx;
52 const struct msm_cpr_config *config;
53 struct notifier_block freq_transition;
54 struct msm_cpr_vp_data *vp;
55};
56
57/* Need to maintain state data for suspend and resume APIs */
58static struct msm_cpr_reg cpr_save_state;
59
60static inline
61void cpr_write_reg(struct msm_cpr *cpr, u32 offset, u32 value)
62{
63 writel_relaxed(value, cpr->base + offset);
64}
65
66static inline u32 cpr_read_reg(struct msm_cpr *cpr, u32 offset)
67{
68 return readl_relaxed(cpr->base + offset);
69}
70
71static
72void cpr_modify_reg(struct msm_cpr *cpr, u32 offset, u32 mask, u32 value)
73{
74 u32 reg_val;
75
76 reg_val = readl_relaxed(cpr->base + offset);
77 reg_val &= ~mask;
78 reg_val |= value;
79 writel_relaxed(reg_val, cpr->base + offset);
80}
81
82#ifdef DEBUG
83static void cpr_regs_dump_all(struct msm_cpr *cpr)
84{
85 pr_debug("RBCPR_GCNT_TARGET(%d): 0x%x\n",
86 cpr->curr_osc, readl_relaxed(cpr->base +
87 RBCPR_GCNT_TARGET(cpr->curr_osc)));
88 pr_debug("RBCPR_TIMER_INTERVAL: 0x%x\n",
89 readl_relaxed(cpr->base + RBCPR_TIMER_INTERVAL));
90 pr_debug("RBIF_TIMER_ADJUST: 0x%x\n",
91 readl_relaxed(cpr->base + RBIF_TIMER_ADJUST));
92 pr_debug("RBIF_LIMIT: 0x%x\n",
93 readl_relaxed(cpr->base + RBIF_LIMIT));
94 pr_debug("RBCPR_STEP_QUOT: 0x%x\n",
95 readl_relaxed(cpr->base + RBCPR_STEP_QUOT));
96 pr_debug("RBIF_SW_VLEVEL: 0x%x\n",
97 readl_relaxed(cpr->base + RBIF_SW_VLEVEL));
98 pr_debug("RBCPR_DEBUG1: 0x%x\n",
99 readl_relaxed(cpr->base + RBCPR_DEBUG1));
100 pr_debug("RBCPR_RESULT_0: 0x%x\n",
101 readl_relaxed(cpr->base + RBCPR_RESULT_0));
102 pr_debug("RBCPR_RESULT_1: 0x%x\n",
103 readl_relaxed(cpr->base + RBCPR_RESULT_1));
104 pr_debug("RBCPR_QUOT_AVG: 0x%x\n",
105 readl_relaxed(cpr->base + RBCPR_QUOT_AVG));
106 pr_debug("RBCPR_CTL: 0x%x\n",
107 readl_relaxed(cpr->base + RBCPR_CTL));
108 pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
109 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
110 pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
111 cpr_read_reg(cpr, RBIF_IRQ_STATUS));
112}
113#endif
114
115/* Enable the CPR H/W Block */
116static void cpr_enable(struct msm_cpr *cpr)
117{
118 mutex_lock(&cpr->cpr_mutex);
119 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
120 mutex_unlock(&cpr->cpr_mutex);
121}
122
123/* Disable the CPR H/W Block */
124static void cpr_disable(struct msm_cpr *cpr)
125{
126 mutex_lock(&cpr->cpr_mutex);
127 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
128 mutex_unlock(&cpr->cpr_mutex);
129}
130
131static int32_t cpr_poll_result(struct msm_cpr *cpr)
132{
133 uint32_t val = 0;
134 int8_t rc = 0;
135
136 rc = readl_poll_timeout(cpr->base + RBCPR_RESULT_0, val, ~val & BUSY_M,
137 10, 1000);
138 if (rc)
139 pr_info("%s: RBCPR_RESULT_0 read error: %d\n",
140 __func__, rc);
141 return rc;
142}
143
144static int32_t cpr_poll_result_done(struct msm_cpr *cpr)
145{
146 uint32_t val = 0;
147 int8_t rc = 0;
148
149 rc = readl_poll_timeout(cpr->base + RBIF_IRQ_STATUS, val, val & 0x1,
150 10, 1000);
151 if (rc)
152 pr_info("%s: RBCPR_IRQ_STATUS read error: %d\n",
153 __func__, rc);
154 return rc;
155}
156
157static void
158cpr_2pt_kv_analysis(struct msm_cpr *cpr, struct msm_cpr_mode *chip_data)
159{
160 int32_t tgt_volt_mV = 0, level_uV, rc;
161 uint32_t quot1, quot2;
162
163 /**
164 * 2 Point KV Analysis to calculate Step Quot
165 * STEP_QUOT is number of QUOT units per PMIC step
166 * STEP_QUOT = (quot1 - quot2) / 4
167 *
168 * The step quot is calculated once for every mode and stored for
169 * later use.
170 */
171 if (chip_data->step_quot != ~0)
172 goto out_2pt_kv;
173
174 /**
175 * Using the value from chip_data->tgt_volt_offset
176 * calculate the new PMIC adjusted voltages and set
177 * the PMIC to provide this value.
178 *
179 * Assuming default voltage is the highest value of safe boot up
180 * voltage, offset is always subtracted from it.
181 *
182 */
183 if (chip_data->tgt_volt_offset > 0) {
184 tgt_volt_mV = chip_data->calibrated_mV -
185 (chip_data->tgt_volt_offset * cpr->vp->step_size);
186 }
187 pr_debug("tgt_volt_mV = %d, calibrated_mV = %d", tgt_volt_mV,
188 chip_data->calibrated_mV);
189
190 /* level_uV = tgt_volt_mV * 1000; */
191 level_uV = 1350000;
192 /* Call the PMIC specific routine to set the voltage */
193 rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
194 if (rc) {
195 pr_err("%s: Initial voltage set at %duV failed. %d\n",
196 __func__, level_uV, rc);
197 return;
198 }
199 rc = regulator_enable(cpr->vreg_cx);
200 if (rc) {
201 pr_err("failed to enable %s, rc=%d\n", "vdd_cx", rc);
202 return;
203 }
204
205 /* Store the adjusted value of voltage */
206 chip_data->calibrated_mV = 1300;
207
208 /* Take first CPR measurement at a higher voltage to get QUOT1 */
209
210 /* Enable the Software mode of operation */
211 cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
212
213 /* Enable the cpr measurement */
214 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
215
216 /* IRQ is already disabled */
217 rc = cpr_poll_result_done(cpr);
218 if (rc) {
219 pr_err("%s: Quot1: Exiting due to INT_DONE poll timeout\n",
220 __func__);
221 return;
222 }
223
224 rc = cpr_poll_result(cpr);
225 if (rc) {
226 pr_err("%s: Quot1: Exiting due to BUSY poll timeout\n",
227 __func__);
228 return;
229 }
230
231 quot1 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
232
233 /* Take second CPR measurement at a lower voltage to get QUOT2 */
234 level_uV = 1300000;
235
236 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
237 /* Call the PMIC specific routine to set the voltage */
238 rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
239 if (rc) {
240 pr_err("%s: Voltage set at %duV failed. %d\n",
241 __func__, level_uV, rc);
242 return;
243 }
244
245 cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
246 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
247
248 /* cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1); */
249 rc = cpr_poll_result_done(cpr);
250 if (rc) {
251 pr_err("%s: Quot2: Exiting due to INT_DONE poll timeout\n",
252 __func__);
253 goto err_poll_result_done;
254 }
255 /* IRQ is already disabled */
256 rc = cpr_poll_result(cpr);
257 if (rc) {
258 pr_err("%s: Quot2: Exiting due to BUSY poll timeout\n",
259 __func__);
260 goto err_poll_result;
261 }
262 quot2 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
263 chip_data->step_quot = (quot1 - quot2) / 4;
264 pr_debug("%s: Calculated Step Quot is %d\n",
265 __func__, chip_data->step_quot);
266 /* Disable the cpr */
267 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
268
269out_2pt_kv:
270 /* Program the step quot */
271 cpr_write_reg(cpr, RBCPR_STEP_QUOT, (chip_data->step_quot & 0xFF));
272 return;
273err_poll_result:
274err_poll_result_done:
275 regulator_disable(cpr->vreg_cx);
276}
277
278static inline
279void cpr_irq_clr_and_ack(struct msm_cpr *cpr, uint32_t mask)
280{
281 /* Clear the interrupt */
282 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
283 /* Acknowledge the Recommendation */
284 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
285}
286
287static inline
288void cpr_irq_clr_and_nack(struct msm_cpr *cpr, uint32_t mask)
289{
290 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
291 cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1);
292}
293
294static void cpr_irq_set(struct msm_cpr *cpr, uint32_t irq, bool enable)
295{
296 uint32_t irq_enabled;
297
298 irq_enabled = cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
299 if (enable == 1)
300 irq_enabled |= irq;
301 else
302 irq_enabled &= ~irq;
303 cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
304 INT_MASK, irq_enabled);
305}
306
307static void
308cpr_up_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
309{
310 int rc, set_volt_mV;
311 struct msm_cpr_mode *chip_data;
312
313 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
314
315 /**
316 * FIXME: Need to handle a potential race condition between
317 * freq switch handler and CPR interrupt handler here
318 */
319 /* Set New PMIC voltage */
320 set_volt_mV = (new_volt < chip_data->Vmax ? new_volt
321 : chip_data->Vmax);
322 rc = regulator_set_voltage(cpr->vreg_cx, set_volt_mV * 1000,
323 set_volt_mV * 1000);
324 if (rc) {
325 pr_err("%s: Voltage set at %dmV failed. %d\n",
326 __func__, set_volt_mV, rc);
327 cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
328 return;
329 }
330 pr_debug("%s: Voltage set at %dmV\n", __func__, set_volt_mV);
331
332 /**
333 * Save the new calibrated voltage to be re-used
334 * whenever we return to same mode after a mode switch.
335 */
336 chip_data->calibrated_mV = set_volt_mV;
337
338 /* Clear all the interrupts */
339 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
340
341 /* Disable Auto ACK for Down interrupts */
342 cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_NACK_DN_EN_M, 0);
343
344 /* Enable down interrupts to App as it might have got disabled if CPR
345 * hit Vmin earlier. Voltage set is above Vmin now.
346 */
347 cpr_irq_set(cpr, DOWN_INT, 1);
348
349 /* Acknowledge the Recommendation */
350 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
351}
352
353static void
354cpr_dn_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
355{
356 int rc, set_volt_mV;
357 struct msm_cpr_mode *chip_data;
358
359 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
360
361 /**
362 * FIXME: Need to handle a potential race condition between
363 * freq switch handler and CPR interrupt handler here
364 */
365 /* Set New PMIC volt */
366 set_volt_mV = (new_volt > chip_data->Vmin ? new_volt
367 : chip_data->Vmin);
368 rc = regulator_set_voltage(cpr->vreg_cx, set_volt_mV * 1000,
369 set_volt_mV * 1000);
370 if (rc) {
371 pr_err("%s: Voltage at %dmV failed %d\n",
372 __func__, set_volt_mV, rc);
373 cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
374 return;
375 }
376 pr_debug("%s: Voltage set at %dmV\n", __func__, set_volt_mV);
377
378 /**
379 * Save the new calibrated voltage to be re-used
380 * whenever we return to same mode after a mode switch.
381 */
382 chip_data->calibrated_mV = set_volt_mV;
383
384 /* Clear all the interrupts */
385 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
386
387 if (new_volt <= chip_data->Vmin) {
388 /*
389 * Disable down interrupt to App after we hit Vmin
390 * It shall be enabled after we service an up interrupt
391 *
392 * A race condition between freq switch handler and CPR
393 * interrupt handler is possible. So, do not disable
394 * interrupt if a freq switch already caused a mode
395 * change since we need this interrupt in the new mode.
396 */
397 if (cpr->cpr_mode == cpr->prev_mode) {
398 /* Enable Auto ACK for CPR Down Flags
399 * while DOWN_INT to App is disabled */
400 cpr_modify_reg(cpr, RBCPR_CTL,
401 SW_AUTO_CONT_NACK_DN_EN_M,
402 SW_AUTO_CONT_NACK_DN_EN);
403 cpr_irq_set(cpr, DOWN_INT, 0);
404 pr_debug("%s: DOWN_INT disabled\n", __func__);
405 }
406 }
407 /* Acknowledge the Recommendation */
408 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
409}
410
411static void cpr_set_vdd(struct msm_cpr *cpr, enum cpr_action action)
412{
413 uint32_t curr_volt, new_volt, error_step;
414 struct msm_cpr_mode *chip_data;
415
416 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
417 error_step = cpr_read_reg(cpr, RBCPR_RESULT_0) >> 2;
418 error_step &= 0xF;
419 curr_volt = chip_data->calibrated_mV;
420
421 if (action == UP) {
422 /**
423 * Using up margin in the comparison helps avoid having to
424 * change up threshold values in chip register.
425 */
426 if (error_step < (cpr->config->up_threshold +
427 cpr->config->up_margin)) {
428 /* FIXME: Avoid repeated dn interrupts if we are here */
429 pr_debug("UP_INT error step too small to set\n");
430 cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
431 return;
432 }
433
434 /* Calculte new PMIC voltage */
435 new_volt = curr_volt + (error_step * cpr->vp->step_size);
436 pr_debug("UP_INT: new_volt: %d\n", new_volt);
437 cpr_up_event_handler(cpr, new_volt);
438
439 } else if (action == DOWN) {
440 /**
441 * Using down margin in the comparison helps avoid having to
442 * change down threshold values in chip register.
443 */
444 if (error_step < (cpr->config->dn_threshold +
445 cpr->config->dn_margin)) {
446 /* FIXME: Avoid repeated dn interrupts if we are here */
447 pr_debug("DOWN_INT error_step too small to set\n");
448 cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
449 return;
450 }
451
452 /* Calculte new PMIC voltage */
453 new_volt = curr_volt - (error_step * cpr->vp->step_size);
454 pr_debug("DOWN_INT: new_volt: %d\n", new_volt);
455 cpr_dn_event_handler(cpr, new_volt);
456 }
457}
458
459static irqreturn_t cpr_irq0_handler(int irq, void *dev_id)
460{
461 struct msm_cpr *cpr = dev_id;
462 uint32_t reg_val, ctl_reg;
463
464 reg_val = cpr_read_reg(cpr, RBIF_IRQ_STATUS);
465 ctl_reg = cpr_read_reg(cpr, RBCPR_CTL);
466
467 /* Following sequence of handling is as per each IRQ's priority */
468 if (reg_val & BIT(4)) {
469 pr_debug(" CPR:IRQ %d occured for UP Flag\n", irq);
470 cpr_set_vdd(cpr, UP);
471
472 } else if ((reg_val & BIT(2)) && !(ctl_reg & SW_AUTO_CONT_NACK_DN_EN)) {
473 pr_debug(" CPR:IRQ %d occured for Down Flag\n", irq);
474 cpr_set_vdd(cpr, DOWN);
475
476 } else if (reg_val & BIT(1)) {
477 pr_debug(" CPR:IRQ %d occured for Min Flag\n", irq);
478 cpr_irq_clr_and_nack(cpr, BIT(1) | BIT(0));
479
480 } else if (reg_val & BIT(5)) {
481 pr_debug(" CPR:IRQ %d occured for MAX Flag\n", irq);
482 cpr_irq_clr_and_nack(cpr, BIT(5) | BIT(0));
483
484 } else if (reg_val & BIT(3)) {
485 /* SW_AUTO_CONT_ACK_EN is enabled */
486 pr_debug(" CPR:IRQ %d occured for Mid Flag\n", irq);
487 }
488 return IRQ_HANDLED;
489}
490
491static void cpr_config(struct msm_cpr *cpr)
492{
493 uint32_t delay_count, cnt = 0, rc, tmp_uV;
494 struct msm_cpr_mode *chip_data;
495
496 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
497
498 /* Program the SW vlevel */
499 cpr_modify_reg(cpr, RBIF_SW_VLEVEL, SW_VLEVEL_M,
500 cpr->config->sw_vlevel);
501
502 /* Set the floor and ceiling values */
503 cpr->floor = cpr->config->floor;
504 cpr->ceiling = cpr->config->ceiling;
505
506 /* Program the Ceiling & Floor values */
507 cpr_modify_reg(cpr, RBIF_LIMIT, (CEILING_M | FLOOR_M),
508 ((cpr->ceiling << 6) | cpr->floor));
509
510 /* Program the Up and Down Threshold values */
511 cpr_modify_reg(cpr, RBCPR_CTL, UP_THRESHOLD_M | DN_THRESHOLD_M,
512 cpr->config->up_threshold << 24 |
513 cpr->config->dn_threshold << 28);
514
515 cpr->curr_osc = chip_data->ring_osc;
516
517 /**
518 * Program the gate count and target values
519 * for all the ring oscilators
520 */
521 while (cnt < NUM_OSC) {
522 cpr_modify_reg(cpr, RBCPR_GCNT_TARGET(cnt),
523 (GCNT_M | TARGET_M),
524 (chip_data->ring_osc_data[cnt].gcnt << 12 |
525 chip_data->ring_osc_data[cnt].target_count));
526 pr_debug("RBCPR_GCNT_TARGET(%d): = 0x%x\n", cnt,
527 readl_relaxed(cpr->base + RBCPR_GCNT_TARGET(cnt)));
528 cnt++;
529 }
530
531 /* Configure the step quot */
532 cpr_2pt_kv_analysis(cpr, chip_data);
533
534 /**
535 * Call the PMIC specific routine to set the voltage
536 * Set with an extra step since it helps as per
537 * characterization data.
538 */
539 chip_data->calibrated_mV += cpr->vp->step_size;
540 tmp_uV = chip_data->calibrated_mV * 1000;
541 rc = regulator_set_voltage(cpr->vreg_cx, tmp_uV, tmp_uV);
542 if (rc)
543 pr_err("%s: Voltage set failed %d\n", __func__, rc);
544
545 /* Program the Timer for default delay between CPR measurements */
546 delay_count = 0xFFFF;
547 cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL, delay_count);
548
549 /* Enable the Timer */
550 cpr_modify_reg(cpr, RBCPR_CTL, TIMER_M, ENABLE_TIMER);
551
552 /* Enable Auto ACK for Mid interrupts */
553 cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_ACK_EN_M,
554 SW_AUTO_CONT_ACK_EN);
555}
556
557static void cpr_mode_config(struct msm_cpr *cpr, enum cpr_mode mode)
558{
559 if (cpr->cpr_mode == mode)
560 return;
561
562 cpr->cpr_mode = mode;
563 pr_debug("%s: Switching to %s mode\n", __func__,
564 (mode == TURBO_MODE ? "TURBO" : "NORMAL"));
565
566 /* Configure the new mode */
567 cpr_config(cpr);
568}
569
570static int
571cpr_freq_transition(struct notifier_block *nb, unsigned long val,
572 void *data)
573{
574 struct msm_cpr *cpr = container_of(nb, struct msm_cpr, freq_transition);
575 struct cpufreq_freqs *freqs = data;
576
577 switch (val) {
578 case CPUFREQ_PRECHANGE:
579 return 0;
580 pr_debug("pre freq change notification to cpr\n");
581
582 disable_irq(cpr->irq);
583 cpr_disable(cpr);
584 cpr->prev_mode = cpr->cpr_mode;
585 break;
586 case CPUFREQ_POSTCHANGE:
587 return 0;
588 pr_debug("post freq change notification to cpr\n");
589
590 if (freqs->new >= cpr->config->nom_freq_limit)
591 cpr_mode_config(cpr, TURBO_MODE);
592 else
593 cpr_mode_config(cpr, NORMAL_MODE);
594 /**
595 * Enable all interrupts. One of them could be in a disabled
596 * state if vdd had hit Vmax / Vmin earlier
597 */
598 cpr_irq_set(cpr, (UP_INT | DOWN_INT), 1);
599
600 enable_irq(cpr->irq);
601
602 cpr_enable(cpr);
603
604 break;
605 default:
606 break;
607 }
608 return 0;
609}
610
611#ifdef CONFIG_PM
612static int msm_cpr_resume(struct device *dev)
613{
614 struct msm_cpr *cpr = dev_get_drvdata(dev);
615 int osc_num = cpr->config->cpr_mode_data->ring_osc;
616
617 cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL,
618 cpr_save_state.rbif_timer_interval);
619 cpr_write_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
620 cpr_save_state.rbif_int_en);
621 cpr_write_reg(cpr, RBIF_LIMIT,
622 cpr_save_state.rbif_limit);
623 cpr_write_reg(cpr, RBIF_TIMER_ADJUST,
624 cpr_save_state.rbif_timer_adjust);
625 cpr_write_reg(cpr, RBCPR_GCNT_TARGET(osc_num),
626 cpr_save_state.rbcpr_gcnt_target);
627 cpr_write_reg(cpr, RBCPR_STEP_QUOT,
628 cpr_save_state.rbcpr_step_quot);
629 cpr_write_reg(cpr, RBIF_SW_VLEVEL,
630 cpr_save_state.rbif_sw_level);
631
632 cpr_enable(cpr);
633 cpr_write_reg(cpr, RBCPR_CTL,
634 cpr_save_state.rbcpr_ctl);
635 enable_irq(cpr->irq);
636
637 return 0;
638}
639
640static int msm_cpr_suspend(struct device *dev)
641
642{
643 struct msm_cpr *cpr = dev_get_drvdata(dev);
644 int osc_num = cpr->config->cpr_mode_data->ring_osc;
645
646 cpr_save_state.rbif_timer_interval =
647 cpr_read_reg(cpr, RBCPR_TIMER_INTERVAL);
648 cpr_save_state.rbif_int_en =
649 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
650 cpr_save_state.rbif_limit =
651 cpr_read_reg(cpr, RBIF_LIMIT);
652 cpr_save_state.rbif_timer_adjust =
653 cpr_read_reg(cpr, RBIF_TIMER_ADJUST);
654 cpr_save_state.rbcpr_gcnt_target =
655 cpr_read_reg(cpr, RBCPR_GCNT_TARGET(osc_num));
656 cpr_save_state.rbcpr_step_quot =
657 cpr_read_reg(cpr, RBCPR_STEP_QUOT);
658 cpr_save_state.rbif_sw_level =
659 cpr_read_reg(cpr, RBIF_SW_VLEVEL);
660 cpr_save_state.rbcpr_ctl =
661 cpr_read_reg(cpr, RBCPR_CTL);
662
663 disable_irq(cpr->irq);
664 cpr_disable(cpr);
665
666 return 0;
667}
668
669void msm_cpr_pm_resume(void)
670{
671 msm_cpr_resume(&cpr_pdev->dev);
672}
673EXPORT_SYMBOL(msm_cpr_pm_resume);
674
675void msm_cpr_pm_suspend(void)
676{
677 msm_cpr_suspend(&cpr_pdev->dev);
678}
679EXPORT_SYMBOL(msm_cpr_pm_suspend);
680#endif
681
682void msm_cpr_disable(void)
683{
684 struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
685 cpr_disable(cpr);
686}
687EXPORT_SYMBOL(msm_cpr_disable);
688
689void msm_cpr_enable(void)
690{
691 struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
692 cpr_enable(cpr);
693}
694EXPORT_SYMBOL(msm_cpr_enable);
695
696static int __devinit msm_cpr_probe(struct platform_device *pdev)
697{
698 int res, irqn, irq_enabled;
699 struct msm_cpr *cpr;
700 const struct msm_cpr_config *pdata = pdev->dev.platform_data;
701 void __iomem *base;
702 struct resource *mem;
703
704 if (!pdata) {
705 pr_err("CPR: Platform data is not available\n");
706 return -EIO;
707 }
708
709 cpr = devm_kzalloc(&pdev->dev, sizeof(struct msm_cpr), GFP_KERNEL);
710 if (!cpr)
711 return -ENOMEM;
712
713 /* Initialize platform_data */
714 cpr->config = pdata;
715
716 cpr_pdev = pdev;
717
718 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
719 if (!mem || !mem->start) {
720 pr_err("CPR: get resource failed\n");
721 res = -ENXIO;
722 goto out;
723 }
724
725 base = ioremap_nocache(mem->start, resource_size(mem));
726 if (!base) {
727 pr_err("CPR: ioremap failed\n");
728 res = -ENOMEM;
729 goto out;
730 }
731
732 if (cpr->config->irq_line < 0) {
733 pr_err("CPR: Invalid IRQ line specified\n");
734 res = -ENXIO;
735 goto err_ioremap;
736 }
737 irqn = platform_get_irq(pdev, cpr->config->irq_line);
738 if (irqn < 0) {
739 pr_err("CPR: Unable to get irq\n");
740 res = -ENXIO;
741 goto err_ioremap;
742 }
743
744 cpr->irq = irqn;
745
746 cpr->base = base;
747
748 cpr->vp = pdata->vp_data;
749
750 mutex_init(&cpr->cpr_mutex);
751
752 /* Initialize the Voltage domain for CPR */
753 cpr->vreg_cx = regulator_get(&pdev->dev, "vddx_cx");
754 if (IS_ERR(cpr->vreg_cx)) {
755 res = PTR_ERR(cpr->vreg_cx);
756 pr_err("could not get regulator: %d\n", res);
757 goto err_reg_get;
758 }
759
760 /* Assume current mode is TURBO Mode */
761 cpr->cpr_mode = TURBO_MODE;
762 cpr->prev_mode = TURBO_MODE;
763
764 /* Initial configuration of CPR */
765 cpr_config(cpr);
766
767 platform_set_drvdata(pdev, cpr);
768
769 /* Initialze the Debugfs Entry for cpr */
770 res = msm_cpr_debug_init(cpr->base);
771 if (res) {
772 pr_err("CPR: Debugfs Creation Failed\n");
773 goto err_ioremap;
774 }
775
776 /* Register the interrupt handler for IRQ 0 */
777 res = request_threaded_irq(irqn, NULL, cpr_irq0_handler,
778 IRQF_TRIGGER_RISING, "msm-cpr-irq0", cpr);
779 if (res) {
780 pr_err("CPR: request irq failed for IRQ %d\n", irqn);
781 goto err_ioremap;
782 }
783
784 /**
785 * Enable the requested interrupt lines.
786 * Do not enable MID_INT since we shall use
787 * SW_AUTO_CONT_ACK_EN bit.
788 */
789 irq_enabled = INT_MASK & ~MID_INT;
790 cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
791 INT_MASK, irq_enabled);
792
793 /* Enable the cpr */
794 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
795
796
797 cpr->freq_transition.notifier_call = cpr_freq_transition;
798 cpufreq_register_notifier(&cpr->freq_transition,
799 CPUFREQ_TRANSITION_NOTIFIER);
800
801 return res;
802
803err_reg_get:
804 free_irq(irqn, cpr);
805err_ioremap:
806 iounmap(base);
807out:
808 return res;
809}
810
811static int __devexit msm_cpr_remove(struct platform_device *pdev)
812{
813 struct msm_cpr *cpr = platform_get_drvdata(pdev);
814
815 cpufreq_unregister_notifier(&cpr->freq_transition,
816 CPUFREQ_TRANSITION_NOTIFIER);
817
818 regulator_disable(cpr->vreg_cx);
819 regulator_put(cpr->vreg_cx);
820 free_irq(cpr->irq, cpr);
821 iounmap(cpr->base);
822 mutex_destroy(&cpr->cpr_mutex);
823 platform_set_drvdata(pdev, NULL);
824
825 return 0;
826}
827
828static const struct dev_pm_ops msm_cpr_dev_pm_ops = {
829 .suspend = msm_cpr_suspend,
830 .resume = msm_cpr_resume,
831};
832
833static struct platform_driver msm_cpr_driver = {
834 .probe = msm_cpr_probe,
835 .remove = __devexit_p(msm_cpr_remove),
836 .driver = {
837 .name = MODULE_NAME,
838 .owner = THIS_MODULE,
839#ifdef CONFIG_PM
840 .pm = &msm_cpr_dev_pm_ops,
841#endif
842 },
843};
844
845static int __init msm_init_cpr(void)
846{
847 return platform_driver_register(&msm_cpr_driver);
848}
849
850module_init(msm_init_cpr);
851
852static void __exit msm_exit_cpr(void)
853{
854 platform_driver_unregister(&msm_cpr_driver);
855}
856
857module_exit(msm_exit_cpr);
858
859MODULE_DESCRIPTION("MSM CPR Driver");
860MODULE_VERSION("1.0");
861MODULE_LICENSE("GPL v2");