blob: 4b30896401eda53c915f28ba87a56af61601c012 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/msm_iomap.h>
27#include <mach/clk.h>
28#include <mach/scm-io.h>
29
30#include "clock.h"
31#include "clock-local.h"
32
33#ifdef CONFIG_MSM_SECURE_IO
34#undef readl_relaxed
35#undef writel_relaxed
36#define readl_relaxed secure_readl
37#define writel_relaxed secure_writel
38#endif
39
40/*
41 * When enabling/disabling a clock, check the halt bit up to this number
42 * number of times (with a 1 us delay in between) before continuing.
43 */
Stephen Boyd138da0e2011-08-05 13:25:57 -070044#define HALT_CHECK_MAX_LOOPS 200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045/* For clock without halt checking, wait this long after enables/disables. */
46#define HALT_CHECK_DELAY_US 10
47
48DEFINE_SPINLOCK(local_clock_reg_lock);
Matt Wagantall84f43fd2011-08-16 23:28:38 -070049struct clk_freq_tbl rcg_dummy_freq = F_END;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051/*
52 * Common Set-Rate Functions
53 */
54
55/* For clocks with MND dividers. */
56void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf)
57{
58 uint32_t ns_reg_val, ctl_reg_val;
59
60 /* Assert MND reset. */
61 ns_reg_val = readl_relaxed(clk->ns_reg);
62 ns_reg_val |= BIT(7);
63 writel_relaxed(ns_reg_val, clk->ns_reg);
64
65 /* Program M and D values. */
66 writel_relaxed(nf->md_val, clk->md_reg);
67
68 /* If the clock has a separate CC register, program it. */
69 if (clk->ns_reg != clk->b.ctl_reg) {
70 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
71 ctl_reg_val &= ~(clk->ctl_mask);
72 ctl_reg_val |= nf->ctl_val;
73 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
74 }
75
76 /* Deassert MND reset. */
77 ns_reg_val &= ~BIT(7);
78 writel_relaxed(ns_reg_val, clk->ns_reg);
79}
80
81void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf)
82{
83 /*
84 * Nothing to do for fixed-rate or integer-divider clocks. Any settings
85 * in NS registers are applied in the enable path, since power can be
86 * saved by leaving an un-clocked or slowly-clocked source selected
87 * until the clock is enabled.
88 */
89}
90
91void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf)
92{
93 uint32_t ctl_reg_val;
94
95 /* Assert MND reset. */
96 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
97 ctl_reg_val |= BIT(8);
98 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
99
100 /* Program M and D values. */
101 writel_relaxed(nf->md_val, clk->md_reg);
102
103 /* Program MN counter Enable and Mode. */
104 ctl_reg_val &= ~(clk->ctl_mask);
105 ctl_reg_val |= nf->ctl_val;
106 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
107
108 /* Deassert MND reset. */
109 ctl_reg_val &= ~BIT(8);
110 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
111}
112
113void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
114{
Stephen Boydc78d9a72011-07-20 00:46:24 -0700115 struct bank_masks *banks = clk->bank_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 const struct bank_mask_info *new_bank_masks;
117 const struct bank_mask_info *old_bank_masks;
118 uint32_t ns_reg_val, ctl_reg_val;
119 uint32_t bank_sel;
120
121 /*
122 * Determine active bank and program the other one. If the clock is
123 * off, program the active bank since bank switching won't work if
124 * both banks aren't running.
125 */
126 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
127 bank_sel = !!(ctl_reg_val & banks->bank_sel_mask);
128 /* If clock isn't running, don't switch banks. */
129 bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
130 if (bank_sel == 0) {
131 new_bank_masks = &banks->bank1_mask;
132 old_bank_masks = &banks->bank0_mask;
133 } else {
134 new_bank_masks = &banks->bank0_mask;
135 old_bank_masks = &banks->bank1_mask;
136 }
137
138 ns_reg_val = readl_relaxed(clk->ns_reg);
139
140 /* Assert bank MND reset. */
141 ns_reg_val |= new_bank_masks->rst_mask;
142 writel_relaxed(ns_reg_val, clk->ns_reg);
143
144 /*
145 * Program NS only if the clock is enabled, since the NS will be set
146 * as part of the enable procedure and should remain with a low-power
147 * MUX input selected until then.
148 */
149 if (clk->enabled) {
150 ns_reg_val &= ~(new_bank_masks->ns_mask);
151 ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
152 writel_relaxed(ns_reg_val, clk->ns_reg);
153 }
154
155 writel_relaxed(nf->md_val, new_bank_masks->md_reg);
156
157 /* Enable counter only if clock is enabled. */
158 if (clk->enabled)
159 ctl_reg_val |= new_bank_masks->mnd_en_mask;
160 else
161 ctl_reg_val &= ~(new_bank_masks->mnd_en_mask);
162
163 ctl_reg_val &= ~(new_bank_masks->mode_mask);
164 ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask);
165 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
166
167 /* Deassert bank MND reset. */
168 ns_reg_val &= ~(new_bank_masks->rst_mask);
169 writel_relaxed(ns_reg_val, clk->ns_reg);
170
171 /*
172 * Switch to the new bank if clock is running. If it isn't, then
173 * no switch is necessary since we programmed the active bank.
174 */
175 if (clk->enabled && clk->current_freq->freq_hz) {
176 ctl_reg_val ^= banks->bank_sel_mask;
177 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
178 /*
179 * Wait at least 6 cycles of slowest bank's clock
180 * for the glitch-free MUX to fully switch sources.
181 */
182 mb();
183 udelay(1);
184
185 /* Disable old bank's MN counter. */
186 ctl_reg_val &= ~(old_bank_masks->mnd_en_mask);
187 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
188
189 /* Program old bank to a low-power source and divider. */
190 ns_reg_val &= ~(old_bank_masks->ns_mask);
191 ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
192 writel_relaxed(ns_reg_val, clk->ns_reg);
193 }
194
195 /*
196 * If this freq requires the MN counter to be enabled,
197 * update the enable mask to match the current bank.
198 */
199 if (nf->mnd_en_mask)
200 nf->mnd_en_mask = new_bank_masks->mnd_en_mask;
201 /* Update the NS mask to match the current bank. */
202 clk->ns_mask = new_bank_masks->ns_mask;
203}
204
205void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
206{
Stephen Boydc78d9a72011-07-20 00:46:24 -0700207 struct bank_masks *banks = clk->bank_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 const struct bank_mask_info *new_bank_masks;
209 const struct bank_mask_info *old_bank_masks;
210 uint32_t ns_reg_val, bank_sel;
211
212 /*
213 * Determine active bank and program the other one. If the clock is
214 * off, program the active bank since bank switching won't work if
215 * both banks aren't running.
216 */
217 ns_reg_val = readl_relaxed(clk->ns_reg);
218 bank_sel = !!(ns_reg_val & banks->bank_sel_mask);
219 /* If clock isn't running, don't switch banks. */
220 bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
221 if (bank_sel == 0) {
222 new_bank_masks = &banks->bank1_mask;
223 old_bank_masks = &banks->bank0_mask;
224 } else {
225 new_bank_masks = &banks->bank0_mask;
226 old_bank_masks = &banks->bank1_mask;
227 }
228
229 /*
230 * Program NS only if the clock is enabled, since the NS will be set
231 * as part of the enable procedure and should remain with a low-power
232 * MUX input selected until then.
233 */
234 if (clk->enabled) {
235 ns_reg_val &= ~(new_bank_masks->ns_mask);
236 ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
237 writel_relaxed(ns_reg_val, clk->ns_reg);
238 }
239
240 /*
241 * Switch to the new bank if clock is running. If it isn't, then
242 * no switch is necessary since we programmed the active bank.
243 */
244 if (clk->enabled && clk->current_freq->freq_hz) {
245 ns_reg_val ^= banks->bank_sel_mask;
246 writel_relaxed(ns_reg_val, clk->ns_reg);
247 /*
248 * Wait at least 6 cycles of slowest bank's clock
249 * for the glitch-free MUX to fully switch sources.
250 */
251 mb();
252 udelay(1);
253
254 /* Program old bank to a low-power source and divider. */
255 ns_reg_val &= ~(old_bank_masks->ns_mask);
256 ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
257 writel_relaxed(ns_reg_val, clk->ns_reg);
258 }
259
260 /* Update the NS mask to match the current bank. */
261 clk->ns_mask = new_bank_masks->ns_mask;
262}
263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264/*
265 * Clock enable/disable functions
266 */
267
268/* Return non-zero if a clock status registers shows the clock is halted. */
269static int branch_clk_is_halted(const struct branch *clk)
270{
271 int invert = (clk->halt_check == ENABLE);
272 int status_bit = readl_relaxed(clk->halt_reg) & BIT(clk->halt_bit);
273 return invert ? !status_bit : status_bit;
274}
275
276static void __branch_clk_enable_reg(const struct branch *clk, const char *name)
277{
278 u32 reg_val;
279
280 if (clk->en_mask) {
281 reg_val = readl_relaxed(clk->ctl_reg);
282 reg_val |= clk->en_mask;
283 writel_relaxed(reg_val, clk->ctl_reg);
284 }
285
286 /*
287 * Use a memory barrier since some halt status registers are
288 * not within the same 1K segment as the branch/root enable
289 * registers. It's also needed in the udelay() case to ensure
290 * the delay starts after the branch enable.
291 */
292 mb();
293
294 /* Wait for clock to enable before returning. */
295 if (clk->halt_check == DELAY)
296 udelay(HALT_CHECK_DELAY_US);
297 else if (clk->halt_check == ENABLE || clk->halt_check == HALT
298 || clk->halt_check == ENABLE_VOTED
299 || clk->halt_check == HALT_VOTED) {
300 int count;
301
302 /* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
303 for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(clk)
304 && count > 0; count--)
305 udelay(1);
306 WARN(count == 0, "%s status stuck at 'off'", name);
307 }
308}
309
310/* Perform any register operations required to enable the clock. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700311static void __rcg_clk_enable_reg(struct rcg_clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312{
313 u32 reg_val;
314 void __iomem *const reg = clk->b.ctl_reg;
315
Matt Wagantall84f43fd2011-08-16 23:28:38 -0700316 WARN(clk->current_freq == &rcg_dummy_freq,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 "Attempting to enable %s before setting its rate. "
318 "Set the rate first!\n", clk->c.dbg_name);
319
320 /*
321 * Program the NS register, if applicable. NS registers are not
322 * set in the set_rate path because power can be saved by deferring
323 * the selection of a clocked source until the clock is enabled.
324 */
325 if (clk->ns_mask) {
326 reg_val = readl_relaxed(clk->ns_reg);
327 reg_val &= ~(clk->ns_mask);
328 reg_val |= (clk->current_freq->ns_val & clk->ns_mask);
329 writel_relaxed(reg_val, clk->ns_reg);
330 }
331
332 /* Enable MN counter, if applicable. */
333 reg_val = readl_relaxed(reg);
334 if (clk->current_freq->mnd_en_mask) {
335 reg_val |= clk->current_freq->mnd_en_mask;
336 writel_relaxed(reg_val, reg);
337 }
338 /* Enable root. */
339 if (clk->root_en_mask) {
340 reg_val |= clk->root_en_mask;
341 writel_relaxed(reg_val, reg);
342 }
343 __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
344}
345
346/* Perform any register operations required to disable the branch. */
347static u32 __branch_clk_disable_reg(const struct branch *clk, const char *name)
348{
349 u32 reg_val;
350
351 reg_val = readl_relaxed(clk->ctl_reg);
352 if (clk->en_mask) {
353 reg_val &= ~(clk->en_mask);
354 writel_relaxed(reg_val, clk->ctl_reg);
355 }
356
357 /*
358 * Use a memory barrier since some halt status registers are
359 * not within the same K segment as the branch/root enable
360 * registers. It's also needed in the udelay() case to ensure
361 * the delay starts after the branch disable.
362 */
363 mb();
364
365 /* Wait for clock to disable before continuing. */
366 if (clk->halt_check == DELAY || clk->halt_check == ENABLE_VOTED
367 || clk->halt_check == HALT_VOTED)
368 udelay(HALT_CHECK_DELAY_US);
369 else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
370 int count;
371
372 /* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
373 for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(clk)
374 && count > 0; count--)
375 udelay(1);
376 WARN(count == 0, "%s status stuck at 'on'", name);
377 }
378
379 return reg_val;
380}
381
382/* Perform any register operations required to disable the generator. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700383static void __rcg_clk_disable_reg(struct rcg_clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384{
385 void __iomem *const reg = clk->b.ctl_reg;
386 uint32_t reg_val;
387
388 reg_val = __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
389 /* Disable root. */
390 if (clk->root_en_mask) {
391 reg_val &= ~(clk->root_en_mask);
392 writel_relaxed(reg_val, reg);
393 }
394 /* Disable MN counter, if applicable. */
395 if (clk->current_freq->mnd_en_mask) {
396 reg_val &= ~(clk->current_freq->mnd_en_mask);
397 writel_relaxed(reg_val, reg);
398 }
399 /*
400 * Program NS register to low-power value with an un-clocked or
401 * slowly-clocked source selected.
402 */
403 if (clk->ns_mask) {
404 reg_val = readl_relaxed(clk->ns_reg);
405 reg_val &= ~(clk->ns_mask);
406 reg_val |= (clk->freq_tbl->ns_val & clk->ns_mask);
407 writel_relaxed(reg_val, clk->ns_reg);
408 }
409}
410
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700411/* Enable a rate-settable clock. */
412int rcg_clk_enable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413{
414 unsigned long flags;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700415 struct rcg_clk *clk = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416
417 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0625ea02011-07-13 18:51:56 -0700418 __rcg_clk_enable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419 clk->enabled = true;
420 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700421
422 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423}
424
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700425/* Disable a rate-settable clock. */
426void rcg_clk_disable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427{
428 unsigned long flags;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700429 struct rcg_clk *clk = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430
431 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0625ea02011-07-13 18:51:56 -0700432 __rcg_clk_disable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433 clk->enabled = false;
434 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
435}
436
Stephen Boyd7fa26742011-08-11 23:22:29 -0700437/* Turn off a clock at boot, without checking refcounts. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700438void rcg_clk_auto_off(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700440 rcg_clk_disable(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441}
442
443/*
444 * Frequency-related functions
445 */
446
447/* Set a clock's frequency. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700448static int _rcg_clk_set_rate(struct rcg_clk *clk, struct clk_freq_tbl *nf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449{
450 struct clk_freq_tbl *cf;
451 int rc = 0;
452 struct clk *chld;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453
454 /* Check if frequency is actually changed. */
455 cf = clk->current_freq;
456 if (nf == cf)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700457 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458
459 if (clk->enabled) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700460 /* Enable source clock dependency for the new freq. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 rc = clk_enable(nf->src_clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700462 if (rc)
463 return rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464 }
465
466 spin_lock(&local_clock_reg_lock);
467
468 /* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
Stephen Boydc78d9a72011-07-20 00:46:24 -0700469 if (!clk->bank_info) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470 /* Disable all branches to prevent glitches. */
471 list_for_each_entry(chld, &clk->c.children, siblings) {
472 struct branch_clk *x = to_branch_clk(chld);
473 /*
474 * We don't need to grab the child's lock because
475 * we hold the local_clock_reg_lock and 'enabled' is
476 * only modified within lock.
477 */
478 if (x->enabled)
479 __branch_clk_disable_reg(&x->b, x->c.dbg_name);
480 }
481 if (clk->enabled)
Matt Wagantall0625ea02011-07-13 18:51:56 -0700482 __rcg_clk_disable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 }
484
485 /* Perform clock-specific frequency switch operations. */
486 BUG_ON(!clk->set_rate);
487 clk->set_rate(clk, nf);
488
489 /*
Matt Wagantall0625ea02011-07-13 18:51:56 -0700490 * Current freq must be updated before __rcg_clk_enable_reg()
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 * is called to make sure the MNCNTR_EN bit is set correctly.
492 */
493 clk->current_freq = nf;
494
495 /* Enable any clocks that were disabled. */
Stephen Boydc78d9a72011-07-20 00:46:24 -0700496 if (!clk->bank_info) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 if (clk->enabled)
Matt Wagantall0625ea02011-07-13 18:51:56 -0700498 __rcg_clk_enable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 /* Enable only branches that were ON before. */
500 list_for_each_entry(chld, &clk->c.children, siblings) {
501 struct branch_clk *x = to_branch_clk(chld);
502 if (x->enabled)
503 __branch_clk_enable_reg(&x->b, x->c.dbg_name);
504 }
505 }
506
507 spin_unlock(&local_clock_reg_lock);
508
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700509 /* Release source requirements of the old freq. */
510 if (clk->enabled)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 clk_disable(cf->src_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512
513 return rc;
514}
515
516/* Set a clock to an exact rate. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700517int rcg_clk_set_rate(struct clk *c, unsigned rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518{
519 struct rcg_clk *clk = to_rcg_clk(c);
520 struct clk_freq_tbl *nf;
521
522 for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
523 && nf->freq_hz != rate; nf++)
524 ;
525
526 if (nf->freq_hz == FREQ_END)
527 return -EINVAL;
528
Matt Wagantall0625ea02011-07-13 18:51:56 -0700529 return _rcg_clk_set_rate(clk, nf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530}
531
532/* Set a clock to a rate greater than some minimum. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700533int rcg_clk_set_min_rate(struct clk *c, unsigned rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534{
535 struct rcg_clk *clk = to_rcg_clk(c);
536 struct clk_freq_tbl *nf;
537
538 for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
539 && nf->freq_hz < rate; nf++)
540 ;
541
542 if (nf->freq_hz == FREQ_END)
543 return -EINVAL;
544
Matt Wagantall0625ea02011-07-13 18:51:56 -0700545 return _rcg_clk_set_rate(clk, nf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546}
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548/* Get the currently-set rate of a clock in Hz. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700549unsigned rcg_clk_get_rate(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550{
551 struct rcg_clk *clk = to_rcg_clk(c);
552 unsigned long flags;
553 unsigned ret = 0;
554
555 spin_lock_irqsave(&local_clock_reg_lock, flags);
556 ret = clk->current_freq->freq_hz;
557 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
558
559 /*
560 * Return 0 if the rate has never been set. Might not be correct,
561 * but it's good enough.
562 */
563 if (ret == FREQ_END)
564 ret = 0;
565
566 return ret;
567}
568
569/* Check if a clock is currently enabled. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700570int rcg_clk_is_enabled(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571{
572 return to_rcg_clk(clk)->enabled;
573}
574
575/* Return a supported rate that's at least the specified rate. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700576long rcg_clk_round_rate(struct clk *c, unsigned rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577{
578 struct rcg_clk *clk = to_rcg_clk(c);
579 struct clk_freq_tbl *f;
580
581 for (f = clk->freq_tbl; f->freq_hz != FREQ_END; f++)
582 if (f->freq_hz >= rate)
583 return f->freq_hz;
584
585 return -EPERM;
586}
587
588bool local_clk_is_local(struct clk *clk)
589{
590 return true;
591}
592
593/* Return the nth supported frequency for a given clock. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700594int rcg_clk_list_rate(struct clk *c, unsigned n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595{
596 struct rcg_clk *clk = to_rcg_clk(c);
597
598 if (!clk->freq_tbl || clk->freq_tbl->freq_hz == FREQ_END)
599 return -ENXIO;
600
601 return (clk->freq_tbl + n)->freq_hz;
602}
603
Matt Wagantall0625ea02011-07-13 18:51:56 -0700604struct clk *rcg_clk_get_parent(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605{
606 return to_rcg_clk(clk)->current_freq->src_clk;
607}
608
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700609int rcg_clk_handoff(struct clk *c)
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700610{
611 struct rcg_clk *clk = to_rcg_clk(c);
612 uint32_t ctl_val, ns_val, md_val, ns_mask;
613 struct clk_freq_tbl *freq;
614
615 ctl_val = readl_relaxed(clk->b.ctl_reg);
616 if (!(ctl_val & clk->root_en_mask))
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700617 return 0;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700618
Stephen Boydc78d9a72011-07-20 00:46:24 -0700619 if (clk->bank_info) {
620 const struct bank_masks *bank_masks = clk->bank_info;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700621 const struct bank_mask_info *bank_info;
Stephen Boydc78d9a72011-07-20 00:46:24 -0700622 if (!(ctl_val & bank_masks->bank_sel_mask))
623 bank_info = &bank_masks->bank0_mask;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700624 else
Stephen Boydc78d9a72011-07-20 00:46:24 -0700625 bank_info = &bank_masks->bank1_mask;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700626
627 ns_mask = bank_info->ns_mask;
628 md_val = readl_relaxed(bank_info->md_reg);
629 } else {
630 ns_mask = clk->ns_mask;
631 md_val = clk->md_reg ? readl_relaxed(clk->md_reg) : 0;
632 }
633
634 ns_val = readl_relaxed(clk->ns_reg) & ns_mask;
635 for (freq = clk->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
636 if ((freq->ns_val & ns_mask) == ns_val &&
637 (freq->mnd_en_mask || freq->md_val == md_val)) {
638 pr_info("%s rate=%d\n", clk->c.dbg_name, freq->freq_hz);
639 break;
640 }
641 }
642 if (freq->freq_hz == FREQ_END)
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700643 return 0;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700644
645 clk->current_freq = freq;
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700646
647 return 1;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700648}
649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650static int pll_vote_clk_enable(struct clk *clk)
651{
652 u32 ena;
653 unsigned long flags;
654 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
655
656 spin_lock_irqsave(&local_clock_reg_lock, flags);
657 ena = readl_relaxed(pll->en_reg);
658 ena |= pll->en_mask;
659 writel_relaxed(ena, pll->en_reg);
660 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
661
662 /* Wait until PLL is enabled */
663 while ((readl_relaxed(pll->status_reg) & BIT(16)) == 0)
664 cpu_relax();
665
666 return 0;
667}
668
669static void pll_vote_clk_disable(struct clk *clk)
670{
671 u32 ena;
672 unsigned long flags;
673 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
674
675 spin_lock_irqsave(&local_clock_reg_lock, flags);
676 ena = readl_relaxed(pll->en_reg);
677 ena &= ~(pll->en_mask);
678 writel_relaxed(ena, pll->en_reg);
679 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
680}
681
682static unsigned pll_vote_clk_get_rate(struct clk *clk)
683{
684 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
685 return pll->rate;
686}
687
688static struct clk *pll_vote_clk_get_parent(struct clk *clk)
689{
690 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
691 return pll->parent;
692}
693
694static int pll_vote_clk_is_enabled(struct clk *clk)
695{
696 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
697 return !!(readl_relaxed(pll->status_reg) & BIT(16));
698}
699
700struct clk_ops clk_ops_pll_vote = {
701 .enable = pll_vote_clk_enable,
702 .disable = pll_vote_clk_disable,
703 .is_enabled = pll_vote_clk_is_enabled,
704 .get_rate = pll_vote_clk_get_rate,
705 .get_parent = pll_vote_clk_get_parent,
706 .is_local = local_clk_is_local,
707};
708
709static int pll_clk_enable(struct clk *clk)
710{
711 u32 mode;
712 unsigned long flags;
713 struct pll_clk *pll = to_pll_clk(clk);
714
715 spin_lock_irqsave(&local_clock_reg_lock, flags);
716 mode = readl_relaxed(pll->mode_reg);
717 /* Disable PLL bypass mode. */
718 mode |= BIT(1);
719 writel_relaxed(mode, pll->mode_reg);
720
721 /*
722 * H/W requires a 5us delay between disabling the bypass and
723 * de-asserting the reset. Delay 10us just to be safe.
724 */
725 mb();
726 udelay(10);
727
728 /* De-assert active-low PLL reset. */
729 mode |= BIT(2);
730 writel_relaxed(mode, pll->mode_reg);
731
732 /* Wait until PLL is locked. */
733 mb();
734 udelay(50);
735
736 /* Enable PLL output. */
737 mode |= BIT(0);
738 writel_relaxed(mode, pll->mode_reg);
739
740 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
741 return 0;
742}
743
744static void pll_clk_disable(struct clk *clk)
745{
746 u32 mode;
747 unsigned long flags;
748 struct pll_clk *pll = to_pll_clk(clk);
749
750 /*
751 * Disable the PLL output, disable test mode, enable
752 * the bypass mode, and assert the reset.
753 */
754 spin_lock_irqsave(&local_clock_reg_lock, flags);
755 mode = readl_relaxed(pll->mode_reg);
756 mode &= ~BM(3, 0);
757 writel_relaxed(mode, pll->mode_reg);
758 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
759}
760
761static unsigned pll_clk_get_rate(struct clk *clk)
762{
763 struct pll_clk *pll = to_pll_clk(clk);
764 return pll->rate;
765}
766
767static struct clk *pll_clk_get_parent(struct clk *clk)
768{
769 struct pll_clk *pll = to_pll_clk(clk);
770 return pll->parent;
771}
772
Vikram Mulukutla489e39e2011-08-31 18:04:05 -0700773int sr_pll_clk_enable(struct clk *clk)
774{
775 u32 mode;
776 unsigned long flags;
777 struct pll_clk *pll = to_pll_clk(clk);
778
779 spin_lock_irqsave(&local_clock_reg_lock, flags);
780 mode = readl_relaxed(pll->mode_reg);
781 /* De-assert active-low PLL reset. */
782 mode |= BIT(2);
783 writel_relaxed(mode, pll->mode_reg);
784
785 /*
786 * H/W requires a 5us delay between disabling the bypass and
787 * de-asserting the reset. Delay 10us just to be safe.
788 */
789 mb();
790 udelay(10);
791
792 /* Disable PLL bypass mode. */
793 mode |= BIT(1);
794 writel_relaxed(mode, pll->mode_reg);
795
796 /* Wait until PLL is locked. */
797 mb();
798 udelay(60);
799
800 /* Enable PLL output. */
801 mode |= BIT(0);
802 writel_relaxed(mode, pll->mode_reg);
803
804 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
805 return 0;
806}
807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808struct clk_ops clk_ops_pll = {
809 .enable = pll_clk_enable,
810 .disable = pll_clk_disable,
811 .get_rate = pll_clk_get_rate,
812 .get_parent = pll_clk_get_parent,
813 .is_local = local_clk_is_local,
814};
815
816struct clk_ops clk_ops_gnd = {
817 .get_rate = fixed_clk_get_rate,
818 .is_local = local_clk_is_local,
819};
820
821struct fixed_clk gnd_clk = {
822 .c = {
823 .dbg_name = "ground_clk",
824 .ops = &clk_ops_gnd,
825 CLK_INIT(gnd_clk.c),
826 },
827};
828
829struct clk_ops clk_ops_measure = {
830 .is_local = local_clk_is_local,
831};
832
833int branch_clk_enable(struct clk *clk)
834{
835 unsigned long flags;
836 struct branch_clk *branch = to_branch_clk(clk);
837
838 spin_lock_irqsave(&local_clock_reg_lock, flags);
839 __branch_clk_enable_reg(&branch->b, branch->c.dbg_name);
840 branch->enabled = true;
841 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
842
843 return 0;
844}
845
846void branch_clk_disable(struct clk *clk)
847{
848 unsigned long flags;
849 struct branch_clk *branch = to_branch_clk(clk);
850
851 spin_lock_irqsave(&local_clock_reg_lock, flags);
852 __branch_clk_disable_reg(&branch->b, branch->c.dbg_name);
853 branch->enabled = false;
854 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855}
856
857struct clk *branch_clk_get_parent(struct clk *clk)
858{
859 struct branch_clk *branch = to_branch_clk(clk);
860 return branch->parent;
861}
862
863int branch_clk_set_parent(struct clk *clk, struct clk *parent)
864{
865 /*
866 * We setup the parent pointer at init time in msm_clock_init().
867 * This check is to make sure drivers can't change the parent.
868 */
869 if (parent && list_empty(&clk->siblings)) {
870 list_add(&clk->siblings, &parent->children);
871 return 0;
872 }
873 return -EINVAL;
874}
875
876int branch_clk_is_enabled(struct clk *clk)
877{
878 struct branch_clk *branch = to_branch_clk(clk);
879 return branch->enabled;
880}
881
882void branch_clk_auto_off(struct clk *clk)
883{
884 struct branch_clk *branch = to_branch_clk(clk);
885 unsigned long flags;
886
887 spin_lock_irqsave(&local_clock_reg_lock, flags);
888 __branch_clk_disable_reg(&branch->b, branch->c.dbg_name);
889 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
890}
891
892int branch_reset(struct branch *clk, enum clk_reset_action action)
893{
894 int ret = 0;
895 u32 reg_val;
896 unsigned long flags;
897
898 if (!clk->reset_reg)
899 return -EPERM;
900
901 spin_lock_irqsave(&local_clock_reg_lock, flags);
902
903 reg_val = readl_relaxed(clk->reset_reg);
904 switch (action) {
905 case CLK_RESET_ASSERT:
906 reg_val |= clk->reset_mask;
907 break;
908 case CLK_RESET_DEASSERT:
909 reg_val &= ~(clk->reset_mask);
910 break;
911 default:
912 ret = -EINVAL;
913 }
914 writel_relaxed(reg_val, clk->reset_reg);
915
916 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
917
918 /* Make sure write is issued before returning. */
919 mb();
920
921 return ret;
922}
923
924int branch_clk_reset(struct clk *clk, enum clk_reset_action action)
925{
926 return branch_reset(&to_branch_clk(clk)->b, action);
927}