blob: c02c6f33587f8d144fe57822ec23d68bee0d1297 [file] [log] [blame]
Matt Wagantallab1adce2012-01-24 14:57:24 -08001/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/msm_iomap.h>
27#include <mach/clk.h>
28#include <mach/scm-io.h>
29
30#include "clock.h"
31#include "clock-local.h"
32
33#ifdef CONFIG_MSM_SECURE_IO
34#undef readl_relaxed
35#undef writel_relaxed
36#define readl_relaxed secure_readl
37#define writel_relaxed secure_writel
38#endif
39
40/*
41 * When enabling/disabling a clock, check the halt bit up to this number
42 * number of times (with a 1 us delay in between) before continuing.
43 */
Stephen Boyd138da0e2011-08-05 13:25:57 -070044#define HALT_CHECK_MAX_LOOPS 200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045/* For clock without halt checking, wait this long after enables/disables. */
46#define HALT_CHECK_DELAY_US 10
47
48DEFINE_SPINLOCK(local_clock_reg_lock);
Matt Wagantall84f43fd2011-08-16 23:28:38 -070049struct clk_freq_tbl rcg_dummy_freq = F_END;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051/*
52 * Common Set-Rate Functions
53 */
54
55/* For clocks with MND dividers. */
56void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf)
57{
58 uint32_t ns_reg_val, ctl_reg_val;
59
60 /* Assert MND reset. */
61 ns_reg_val = readl_relaxed(clk->ns_reg);
62 ns_reg_val |= BIT(7);
63 writel_relaxed(ns_reg_val, clk->ns_reg);
64
65 /* Program M and D values. */
66 writel_relaxed(nf->md_val, clk->md_reg);
67
68 /* If the clock has a separate CC register, program it. */
69 if (clk->ns_reg != clk->b.ctl_reg) {
70 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
71 ctl_reg_val &= ~(clk->ctl_mask);
72 ctl_reg_val |= nf->ctl_val;
73 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
74 }
75
76 /* Deassert MND reset. */
77 ns_reg_val &= ~BIT(7);
78 writel_relaxed(ns_reg_val, clk->ns_reg);
79}
80
81void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf)
82{
83 /*
84 * Nothing to do for fixed-rate or integer-divider clocks. Any settings
85 * in NS registers are applied in the enable path, since power can be
86 * saved by leaving an un-clocked or slowly-clocked source selected
87 * until the clock is enabled.
88 */
89}
90
91void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf)
92{
93 uint32_t ctl_reg_val;
94
95 /* Assert MND reset. */
96 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
97 ctl_reg_val |= BIT(8);
98 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
99
100 /* Program M and D values. */
101 writel_relaxed(nf->md_val, clk->md_reg);
102
103 /* Program MN counter Enable and Mode. */
104 ctl_reg_val &= ~(clk->ctl_mask);
105 ctl_reg_val |= nf->ctl_val;
106 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
107
108 /* Deassert MND reset. */
109 ctl_reg_val &= ~BIT(8);
110 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
111}
112
113void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
114{
Stephen Boydc78d9a72011-07-20 00:46:24 -0700115 struct bank_masks *banks = clk->bank_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 const struct bank_mask_info *new_bank_masks;
117 const struct bank_mask_info *old_bank_masks;
118 uint32_t ns_reg_val, ctl_reg_val;
119 uint32_t bank_sel;
120
121 /*
122 * Determine active bank and program the other one. If the clock is
123 * off, program the active bank since bank switching won't work if
124 * both banks aren't running.
125 */
126 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
127 bank_sel = !!(ctl_reg_val & banks->bank_sel_mask);
128 /* If clock isn't running, don't switch banks. */
129 bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
130 if (bank_sel == 0) {
131 new_bank_masks = &banks->bank1_mask;
132 old_bank_masks = &banks->bank0_mask;
133 } else {
134 new_bank_masks = &banks->bank0_mask;
135 old_bank_masks = &banks->bank1_mask;
136 }
137
138 ns_reg_val = readl_relaxed(clk->ns_reg);
139
140 /* Assert bank MND reset. */
141 ns_reg_val |= new_bank_masks->rst_mask;
142 writel_relaxed(ns_reg_val, clk->ns_reg);
143
144 /*
145 * Program NS only if the clock is enabled, since the NS will be set
146 * as part of the enable procedure and should remain with a low-power
147 * MUX input selected until then.
148 */
149 if (clk->enabled) {
150 ns_reg_val &= ~(new_bank_masks->ns_mask);
151 ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
152 writel_relaxed(ns_reg_val, clk->ns_reg);
153 }
154
155 writel_relaxed(nf->md_val, new_bank_masks->md_reg);
156
157 /* Enable counter only if clock is enabled. */
158 if (clk->enabled)
159 ctl_reg_val |= new_bank_masks->mnd_en_mask;
160 else
161 ctl_reg_val &= ~(new_bank_masks->mnd_en_mask);
162
163 ctl_reg_val &= ~(new_bank_masks->mode_mask);
164 ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask);
165 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
166
167 /* Deassert bank MND reset. */
168 ns_reg_val &= ~(new_bank_masks->rst_mask);
169 writel_relaxed(ns_reg_val, clk->ns_reg);
170
171 /*
172 * Switch to the new bank if clock is running. If it isn't, then
173 * no switch is necessary since we programmed the active bank.
174 */
175 if (clk->enabled && clk->current_freq->freq_hz) {
176 ctl_reg_val ^= banks->bank_sel_mask;
177 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
178 /*
179 * Wait at least 6 cycles of slowest bank's clock
180 * for the glitch-free MUX to fully switch sources.
181 */
182 mb();
183 udelay(1);
184
185 /* Disable old bank's MN counter. */
186 ctl_reg_val &= ~(old_bank_masks->mnd_en_mask);
187 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
188
189 /* Program old bank to a low-power source and divider. */
190 ns_reg_val &= ~(old_bank_masks->ns_mask);
191 ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
192 writel_relaxed(ns_reg_val, clk->ns_reg);
193 }
194
195 /*
196 * If this freq requires the MN counter to be enabled,
197 * update the enable mask to match the current bank.
198 */
199 if (nf->mnd_en_mask)
200 nf->mnd_en_mask = new_bank_masks->mnd_en_mask;
201 /* Update the NS mask to match the current bank. */
202 clk->ns_mask = new_bank_masks->ns_mask;
203}
204
205void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
206{
Stephen Boydc78d9a72011-07-20 00:46:24 -0700207 struct bank_masks *banks = clk->bank_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 const struct bank_mask_info *new_bank_masks;
209 const struct bank_mask_info *old_bank_masks;
210 uint32_t ns_reg_val, bank_sel;
211
212 /*
213 * Determine active bank and program the other one. If the clock is
214 * off, program the active bank since bank switching won't work if
215 * both banks aren't running.
216 */
217 ns_reg_val = readl_relaxed(clk->ns_reg);
218 bank_sel = !!(ns_reg_val & banks->bank_sel_mask);
219 /* If clock isn't running, don't switch banks. */
220 bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
221 if (bank_sel == 0) {
222 new_bank_masks = &banks->bank1_mask;
223 old_bank_masks = &banks->bank0_mask;
224 } else {
225 new_bank_masks = &banks->bank0_mask;
226 old_bank_masks = &banks->bank1_mask;
227 }
228
229 /*
230 * Program NS only if the clock is enabled, since the NS will be set
231 * as part of the enable procedure and should remain with a low-power
232 * MUX input selected until then.
233 */
234 if (clk->enabled) {
235 ns_reg_val &= ~(new_bank_masks->ns_mask);
236 ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
237 writel_relaxed(ns_reg_val, clk->ns_reg);
238 }
239
240 /*
241 * Switch to the new bank if clock is running. If it isn't, then
242 * no switch is necessary since we programmed the active bank.
243 */
244 if (clk->enabled && clk->current_freq->freq_hz) {
245 ns_reg_val ^= banks->bank_sel_mask;
246 writel_relaxed(ns_reg_val, clk->ns_reg);
247 /*
248 * Wait at least 6 cycles of slowest bank's clock
249 * for the glitch-free MUX to fully switch sources.
250 */
251 mb();
252 udelay(1);
253
254 /* Program old bank to a low-power source and divider. */
255 ns_reg_val &= ~(old_bank_masks->ns_mask);
256 ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
257 writel_relaxed(ns_reg_val, clk->ns_reg);
258 }
259
260 /* Update the NS mask to match the current bank. */
261 clk->ns_mask = new_bank_masks->ns_mask;
262}
263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264/*
265 * Clock enable/disable functions
266 */
267
268/* Return non-zero if a clock status registers shows the clock is halted. */
269static int branch_clk_is_halted(const struct branch *clk)
270{
271 int invert = (clk->halt_check == ENABLE);
272 int status_bit = readl_relaxed(clk->halt_reg) & BIT(clk->halt_bit);
273 return invert ? !status_bit : status_bit;
274}
275
Stephen Boyda52d7e32011-11-10 11:59:00 -0800276int branch_in_hwcg_mode(const struct branch *b)
277{
278 if (!b->hwcg_mask)
279 return 0;
280
281 return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask);
282}
283
Stephen Boyd092fd182011-10-21 15:56:30 -0700284void __branch_clk_enable_reg(const struct branch *clk, const char *name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285{
286 u32 reg_val;
287
288 if (clk->en_mask) {
289 reg_val = readl_relaxed(clk->ctl_reg);
290 reg_val |= clk->en_mask;
291 writel_relaxed(reg_val, clk->ctl_reg);
292 }
293
294 /*
295 * Use a memory barrier since some halt status registers are
296 * not within the same 1K segment as the branch/root enable
297 * registers. It's also needed in the udelay() case to ensure
298 * the delay starts after the branch enable.
299 */
300 mb();
301
Stephen Boyda52d7e32011-11-10 11:59:00 -0800302 /* Skip checking halt bit if the clock is in hardware gated mode */
303 if (branch_in_hwcg_mode(clk))
304 return;
305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306 /* Wait for clock to enable before returning. */
307 if (clk->halt_check == DELAY)
308 udelay(HALT_CHECK_DELAY_US);
309 else if (clk->halt_check == ENABLE || clk->halt_check == HALT
310 || clk->halt_check == ENABLE_VOTED
311 || clk->halt_check == HALT_VOTED) {
312 int count;
313
314 /* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
315 for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(clk)
316 && count > 0; count--)
317 udelay(1);
318 WARN(count == 0, "%s status stuck at 'off'", name);
319 }
320}
321
322/* Perform any register operations required to enable the clock. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700323static void __rcg_clk_enable_reg(struct rcg_clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324{
325 u32 reg_val;
326 void __iomem *const reg = clk->b.ctl_reg;
327
Matt Wagantall84f43fd2011-08-16 23:28:38 -0700328 WARN(clk->current_freq == &rcg_dummy_freq,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329 "Attempting to enable %s before setting its rate. "
330 "Set the rate first!\n", clk->c.dbg_name);
331
332 /*
333 * Program the NS register, if applicable. NS registers are not
334 * set in the set_rate path because power can be saved by deferring
335 * the selection of a clocked source until the clock is enabled.
336 */
337 if (clk->ns_mask) {
338 reg_val = readl_relaxed(clk->ns_reg);
339 reg_val &= ~(clk->ns_mask);
340 reg_val |= (clk->current_freq->ns_val & clk->ns_mask);
341 writel_relaxed(reg_val, clk->ns_reg);
342 }
343
344 /* Enable MN counter, if applicable. */
345 reg_val = readl_relaxed(reg);
346 if (clk->current_freq->mnd_en_mask) {
347 reg_val |= clk->current_freq->mnd_en_mask;
348 writel_relaxed(reg_val, reg);
349 }
350 /* Enable root. */
351 if (clk->root_en_mask) {
352 reg_val |= clk->root_en_mask;
353 writel_relaxed(reg_val, reg);
354 }
355 __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
356}
357
358/* Perform any register operations required to disable the branch. */
Stephen Boyd092fd182011-10-21 15:56:30 -0700359u32 __branch_clk_disable_reg(const struct branch *clk, const char *name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360{
361 u32 reg_val;
362
363 reg_val = readl_relaxed(clk->ctl_reg);
364 if (clk->en_mask) {
365 reg_val &= ~(clk->en_mask);
366 writel_relaxed(reg_val, clk->ctl_reg);
367 }
368
369 /*
370 * Use a memory barrier since some halt status registers are
371 * not within the same K segment as the branch/root enable
372 * registers. It's also needed in the udelay() case to ensure
373 * the delay starts after the branch disable.
374 */
375 mb();
376
Stephen Boyda52d7e32011-11-10 11:59:00 -0800377 /* Skip checking halt bit if the clock is in hardware gated mode */
378 if (branch_in_hwcg_mode(clk))
379 return reg_val;
380
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 /* Wait for clock to disable before continuing. */
382 if (clk->halt_check == DELAY || clk->halt_check == ENABLE_VOTED
383 || clk->halt_check == HALT_VOTED)
384 udelay(HALT_CHECK_DELAY_US);
385 else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
386 int count;
387
388 /* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
389 for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(clk)
390 && count > 0; count--)
391 udelay(1);
392 WARN(count == 0, "%s status stuck at 'on'", name);
393 }
394
395 return reg_val;
396}
397
398/* Perform any register operations required to disable the generator. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700399static void __rcg_clk_disable_reg(struct rcg_clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400{
401 void __iomem *const reg = clk->b.ctl_reg;
402 uint32_t reg_val;
403
404 reg_val = __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
405 /* Disable root. */
406 if (clk->root_en_mask) {
407 reg_val &= ~(clk->root_en_mask);
408 writel_relaxed(reg_val, reg);
409 }
410 /* Disable MN counter, if applicable. */
411 if (clk->current_freq->mnd_en_mask) {
412 reg_val &= ~(clk->current_freq->mnd_en_mask);
413 writel_relaxed(reg_val, reg);
414 }
415 /*
416 * Program NS register to low-power value with an un-clocked or
417 * slowly-clocked source selected.
418 */
419 if (clk->ns_mask) {
420 reg_val = readl_relaxed(clk->ns_reg);
421 reg_val &= ~(clk->ns_mask);
422 reg_val |= (clk->freq_tbl->ns_val & clk->ns_mask);
423 writel_relaxed(reg_val, clk->ns_reg);
424 }
425}
426
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700427/* Enable a rate-settable clock. */
428int rcg_clk_enable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429{
430 unsigned long flags;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700431 struct rcg_clk *clk = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432
433 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0625ea02011-07-13 18:51:56 -0700434 __rcg_clk_enable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 clk->enabled = true;
436 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700437
438 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439}
440
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700441/* Disable a rate-settable clock. */
442void rcg_clk_disable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443{
444 unsigned long flags;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700445 struct rcg_clk *clk = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446
447 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0625ea02011-07-13 18:51:56 -0700448 __rcg_clk_disable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449 clk->enabled = false;
450 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
451}
452
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453/*
454 * Frequency-related functions
455 */
456
Matt Wagantallab1adce2012-01-24 14:57:24 -0800457/* Set a clock to an exact rate. */
458int rcg_clk_set_rate(struct clk *c, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459{
Matt Wagantallab1adce2012-01-24 14:57:24 -0800460 struct rcg_clk *clk = to_rcg_clk(c);
461 struct clk_freq_tbl *nf, *cf;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462 struct clk *chld;
Matt Wagantallab1adce2012-01-24 14:57:24 -0800463 int rc = 0;
464
465 for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
466 && nf->freq_hz != rate; nf++)
467 ;
468
469 if (nf->freq_hz == FREQ_END)
470 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471
472 /* Check if frequency is actually changed. */
473 cf = clk->current_freq;
474 if (nf == cf)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700475 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476
477 if (clk->enabled) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700478 /* Enable source clock dependency for the new freq. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 rc = clk_enable(nf->src_clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700480 if (rc)
481 return rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 }
483
484 spin_lock(&local_clock_reg_lock);
485
486 /* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
Stephen Boydc78d9a72011-07-20 00:46:24 -0700487 if (!clk->bank_info) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 /* Disable all branches to prevent glitches. */
489 list_for_each_entry(chld, &clk->c.children, siblings) {
490 struct branch_clk *x = to_branch_clk(chld);
491 /*
492 * We don't need to grab the child's lock because
493 * we hold the local_clock_reg_lock and 'enabled' is
494 * only modified within lock.
495 */
496 if (x->enabled)
497 __branch_clk_disable_reg(&x->b, x->c.dbg_name);
498 }
499 if (clk->enabled)
Matt Wagantall0625ea02011-07-13 18:51:56 -0700500 __rcg_clk_disable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 }
502
503 /* Perform clock-specific frequency switch operations. */
504 BUG_ON(!clk->set_rate);
505 clk->set_rate(clk, nf);
506
507 /*
Matt Wagantall0625ea02011-07-13 18:51:56 -0700508 * Current freq must be updated before __rcg_clk_enable_reg()
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 * is called to make sure the MNCNTR_EN bit is set correctly.
510 */
511 clk->current_freq = nf;
512
513 /* Enable any clocks that were disabled. */
Stephen Boydc78d9a72011-07-20 00:46:24 -0700514 if (!clk->bank_info) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 if (clk->enabled)
Matt Wagantall0625ea02011-07-13 18:51:56 -0700516 __rcg_clk_enable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 /* Enable only branches that were ON before. */
518 list_for_each_entry(chld, &clk->c.children, siblings) {
519 struct branch_clk *x = to_branch_clk(chld);
520 if (x->enabled)
521 __branch_clk_enable_reg(&x->b, x->c.dbg_name);
522 }
523 }
524
525 spin_unlock(&local_clock_reg_lock);
526
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700527 /* Release source requirements of the old freq. */
528 if (clk->enabled)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 clk_disable(cf->src_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530
531 return rc;
532}
533
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534/* Get the currently-set rate of a clock in Hz. */
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700535unsigned long rcg_clk_get_rate(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536{
537 struct rcg_clk *clk = to_rcg_clk(c);
538 unsigned long flags;
539 unsigned ret = 0;
540
541 spin_lock_irqsave(&local_clock_reg_lock, flags);
542 ret = clk->current_freq->freq_hz;
543 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
544
545 /*
546 * Return 0 if the rate has never been set. Might not be correct,
547 * but it's good enough.
548 */
549 if (ret == FREQ_END)
550 ret = 0;
551
552 return ret;
553}
554
555/* Check if a clock is currently enabled. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700556int rcg_clk_is_enabled(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557{
558 return to_rcg_clk(clk)->enabled;
559}
560
561/* Return a supported rate that's at least the specified rate. */
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700562long rcg_clk_round_rate(struct clk *c, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563{
564 struct rcg_clk *clk = to_rcg_clk(c);
565 struct clk_freq_tbl *f;
566
567 for (f = clk->freq_tbl; f->freq_hz != FREQ_END; f++)
568 if (f->freq_hz >= rate)
569 return f->freq_hz;
570
571 return -EPERM;
572}
573
574bool local_clk_is_local(struct clk *clk)
575{
576 return true;
577}
578
579/* Return the nth supported frequency for a given clock. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700580int rcg_clk_list_rate(struct clk *c, unsigned n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581{
582 struct rcg_clk *clk = to_rcg_clk(c);
583
584 if (!clk->freq_tbl || clk->freq_tbl->freq_hz == FREQ_END)
585 return -ENXIO;
586
587 return (clk->freq_tbl + n)->freq_hz;
588}
589
Matt Wagantall0625ea02011-07-13 18:51:56 -0700590struct clk *rcg_clk_get_parent(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591{
592 return to_rcg_clk(clk)->current_freq->src_clk;
593}
594
Stephen Boyda52d7e32011-11-10 11:59:00 -0800595/* Disable hw clock gating if not set at boot */
596static void branch_handoff(struct branch *clk, struct clk *c)
597{
598 if (!branch_in_hwcg_mode(clk)) {
599 clk->hwcg_mask = 0;
600 c->flags &= ~CLKFLAG_HWCG;
601 } else {
602 c->flags |= CLKFLAG_HWCG;
603 }
604}
605
606int branch_clk_handoff(struct clk *c)
607{
608 struct branch_clk *clk = to_branch_clk(c);
609 branch_handoff(&clk->b, &clk->c);
610 return 0;
611}
612
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700613int rcg_clk_handoff(struct clk *c)
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700614{
615 struct rcg_clk *clk = to_rcg_clk(c);
616 uint32_t ctl_val, ns_val, md_val, ns_mask;
617 struct clk_freq_tbl *freq;
618
Stephen Boyda52d7e32011-11-10 11:59:00 -0800619 branch_handoff(&clk->b, &clk->c);
620
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700621 ctl_val = readl_relaxed(clk->b.ctl_reg);
622 if (!(ctl_val & clk->root_en_mask))
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700623 return 0;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700624
Stephen Boydc78d9a72011-07-20 00:46:24 -0700625 if (clk->bank_info) {
626 const struct bank_masks *bank_masks = clk->bank_info;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700627 const struct bank_mask_info *bank_info;
Stephen Boydc78d9a72011-07-20 00:46:24 -0700628 if (!(ctl_val & bank_masks->bank_sel_mask))
629 bank_info = &bank_masks->bank0_mask;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700630 else
Stephen Boydc78d9a72011-07-20 00:46:24 -0700631 bank_info = &bank_masks->bank1_mask;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700632
633 ns_mask = bank_info->ns_mask;
Tianyi Goue46938b2012-01-31 12:30:12 -0800634 md_val = bank_info->md_reg ?
635 readl_relaxed(bank_info->md_reg) : 0;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700636 } else {
637 ns_mask = clk->ns_mask;
638 md_val = clk->md_reg ? readl_relaxed(clk->md_reg) : 0;
639 }
640
641 ns_val = readl_relaxed(clk->ns_reg) & ns_mask;
642 for (freq = clk->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
643 if ((freq->ns_val & ns_mask) == ns_val &&
Stephen Boyd44eb4712012-01-23 17:58:09 -0800644 (!freq->mnd_en_mask || freq->md_val == md_val)) {
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700645 pr_info("%s rate=%d\n", clk->c.dbg_name, freq->freq_hz);
646 break;
647 }
648 }
649 if (freq->freq_hz == FREQ_END)
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700650 return 0;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700651
652 clk->current_freq = freq;
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700653
654 return 1;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700655}
656
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700657int pll_vote_clk_enable(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658{
659 u32 ena;
660 unsigned long flags;
661 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
662
663 spin_lock_irqsave(&local_clock_reg_lock, flags);
664 ena = readl_relaxed(pll->en_reg);
665 ena |= pll->en_mask;
666 writel_relaxed(ena, pll->en_reg);
667 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
668
669 /* Wait until PLL is enabled */
670 while ((readl_relaxed(pll->status_reg) & BIT(16)) == 0)
671 cpu_relax();
672
673 return 0;
674}
675
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700676void pll_vote_clk_disable(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677{
678 u32 ena;
679 unsigned long flags;
680 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
681
682 spin_lock_irqsave(&local_clock_reg_lock, flags);
683 ena = readl_relaxed(pll->en_reg);
684 ena &= ~(pll->en_mask);
685 writel_relaxed(ena, pll->en_reg);
686 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
687}
688
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700689unsigned long pll_vote_clk_get_rate(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690{
691 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
692 return pll->rate;
693}
694
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700695struct clk *pll_vote_clk_get_parent(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696{
697 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
698 return pll->parent;
699}
700
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700701int pll_vote_clk_is_enabled(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702{
703 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
704 return !!(readl_relaxed(pll->status_reg) & BIT(16));
705}
706
707struct clk_ops clk_ops_pll_vote = {
708 .enable = pll_vote_clk_enable,
709 .disable = pll_vote_clk_disable,
Matt Wagantalle3d939d2011-11-06 11:21:37 -0800710 .auto_off = pll_vote_clk_disable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 .is_enabled = pll_vote_clk_is_enabled,
712 .get_rate = pll_vote_clk_get_rate,
713 .get_parent = pll_vote_clk_get_parent,
714 .is_local = local_clk_is_local,
715};
716
717static int pll_clk_enable(struct clk *clk)
718{
719 u32 mode;
720 unsigned long flags;
721 struct pll_clk *pll = to_pll_clk(clk);
722
723 spin_lock_irqsave(&local_clock_reg_lock, flags);
724 mode = readl_relaxed(pll->mode_reg);
725 /* Disable PLL bypass mode. */
726 mode |= BIT(1);
727 writel_relaxed(mode, pll->mode_reg);
728
729 /*
730 * H/W requires a 5us delay between disabling the bypass and
731 * de-asserting the reset. Delay 10us just to be safe.
732 */
733 mb();
734 udelay(10);
735
736 /* De-assert active-low PLL reset. */
737 mode |= BIT(2);
738 writel_relaxed(mode, pll->mode_reg);
739
740 /* Wait until PLL is locked. */
741 mb();
742 udelay(50);
743
744 /* Enable PLL output. */
745 mode |= BIT(0);
746 writel_relaxed(mode, pll->mode_reg);
747
748 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
749 return 0;
750}
751
752static void pll_clk_disable(struct clk *clk)
753{
754 u32 mode;
755 unsigned long flags;
756 struct pll_clk *pll = to_pll_clk(clk);
757
758 /*
759 * Disable the PLL output, disable test mode, enable
760 * the bypass mode, and assert the reset.
761 */
762 spin_lock_irqsave(&local_clock_reg_lock, flags);
763 mode = readl_relaxed(pll->mode_reg);
764 mode &= ~BM(3, 0);
765 writel_relaxed(mode, pll->mode_reg);
766 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
767}
768
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700769static unsigned long pll_clk_get_rate(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770{
771 struct pll_clk *pll = to_pll_clk(clk);
772 return pll->rate;
773}
774
775static struct clk *pll_clk_get_parent(struct clk *clk)
776{
777 struct pll_clk *pll = to_pll_clk(clk);
778 return pll->parent;
779}
780
Vikram Mulukutla489e39e2011-08-31 18:04:05 -0700781int sr_pll_clk_enable(struct clk *clk)
782{
783 u32 mode;
784 unsigned long flags;
785 struct pll_clk *pll = to_pll_clk(clk);
786
787 spin_lock_irqsave(&local_clock_reg_lock, flags);
788 mode = readl_relaxed(pll->mode_reg);
789 /* De-assert active-low PLL reset. */
790 mode |= BIT(2);
791 writel_relaxed(mode, pll->mode_reg);
792
793 /*
794 * H/W requires a 5us delay between disabling the bypass and
795 * de-asserting the reset. Delay 10us just to be safe.
796 */
797 mb();
798 udelay(10);
799
800 /* Disable PLL bypass mode. */
801 mode |= BIT(1);
802 writel_relaxed(mode, pll->mode_reg);
803
804 /* Wait until PLL is locked. */
805 mb();
806 udelay(60);
807
808 /* Enable PLL output. */
809 mode |= BIT(0);
810 writel_relaxed(mode, pll->mode_reg);
811
812 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
813 return 0;
814}
815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816struct clk_ops clk_ops_pll = {
817 .enable = pll_clk_enable,
818 .disable = pll_clk_disable,
Matt Wagantalle3d939d2011-11-06 11:21:37 -0800819 .auto_off = pll_clk_disable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 .get_rate = pll_clk_get_rate,
821 .get_parent = pll_clk_get_parent,
822 .is_local = local_clk_is_local,
823};
824
825struct clk_ops clk_ops_gnd = {
826 .get_rate = fixed_clk_get_rate,
827 .is_local = local_clk_is_local,
828};
829
830struct fixed_clk gnd_clk = {
831 .c = {
832 .dbg_name = "ground_clk",
833 .ops = &clk_ops_gnd,
834 CLK_INIT(gnd_clk.c),
835 },
836};
837
838struct clk_ops clk_ops_measure = {
839 .is_local = local_clk_is_local,
840};
841
842int branch_clk_enable(struct clk *clk)
843{
844 unsigned long flags;
845 struct branch_clk *branch = to_branch_clk(clk);
846
847 spin_lock_irqsave(&local_clock_reg_lock, flags);
848 __branch_clk_enable_reg(&branch->b, branch->c.dbg_name);
849 branch->enabled = true;
850 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
851
852 return 0;
853}
854
855void branch_clk_disable(struct clk *clk)
856{
857 unsigned long flags;
858 struct branch_clk *branch = to_branch_clk(clk);
859
860 spin_lock_irqsave(&local_clock_reg_lock, flags);
861 __branch_clk_disable_reg(&branch->b, branch->c.dbg_name);
862 branch->enabled = false;
863 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864}
865
866struct clk *branch_clk_get_parent(struct clk *clk)
867{
868 struct branch_clk *branch = to_branch_clk(clk);
869 return branch->parent;
870}
871
872int branch_clk_set_parent(struct clk *clk, struct clk *parent)
873{
874 /*
875 * We setup the parent pointer at init time in msm_clock_init().
876 * This check is to make sure drivers can't change the parent.
877 */
878 if (parent && list_empty(&clk->siblings)) {
879 list_add(&clk->siblings, &parent->children);
880 return 0;
881 }
882 return -EINVAL;
883}
884
885int branch_clk_is_enabled(struct clk *clk)
886{
887 struct branch_clk *branch = to_branch_clk(clk);
888 return branch->enabled;
889}
890
Stephen Boyda52d7e32011-11-10 11:59:00 -0800891static void branch_enable_hwcg(struct branch *b)
892{
893 unsigned long flags;
894 u32 reg_val;
895
896 spin_lock_irqsave(&local_clock_reg_lock, flags);
897 reg_val = readl_relaxed(b->hwcg_reg);
898 reg_val |= b->hwcg_mask;
899 writel_relaxed(reg_val, b->hwcg_reg);
900 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
901}
902
903static void branch_disable_hwcg(struct branch *b)
904{
905 unsigned long flags;
906 u32 reg_val;
907
908 spin_lock_irqsave(&local_clock_reg_lock, flags);
909 reg_val = readl_relaxed(b->hwcg_reg);
910 reg_val &= ~b->hwcg_mask;
911 writel_relaxed(reg_val, b->hwcg_reg);
912 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
913}
914
915void branch_clk_enable_hwcg(struct clk *clk)
916{
917 struct branch_clk *branch = to_branch_clk(clk);
918 branch_enable_hwcg(&branch->b);
919}
920
921void branch_clk_disable_hwcg(struct clk *clk)
922{
923 struct branch_clk *branch = to_branch_clk(clk);
924 branch_disable_hwcg(&branch->b);
925}
926
Matt Wagantall7e0b6c92012-01-20 18:48:05 -0800927static int branch_set_flags(struct branch *b, unsigned flags)
928{
929 unsigned long irq_flags;
930 u32 reg_val;
931 int ret = 0;
932
933 if (!b->retain_reg)
934 return -EPERM;
935
936 spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
937 reg_val = readl_relaxed(b->retain_reg);
938 switch (flags) {
939 case CLKFLAG_RETAIN:
940 reg_val |= b->retain_mask;
941 break;
942 case CLKFLAG_NORETAIN:
943 reg_val &= ~b->retain_mask;
944 break;
945 default:
946 ret = -EINVAL;
947 }
948 writel_relaxed(reg_val, b->retain_reg);
949 spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
950
951 return ret;
952}
953
954int branch_clk_set_flags(struct clk *clk, unsigned flags)
955{
956 return branch_set_flags(&to_branch_clk(clk)->b, flags);
957}
958
Stephen Boyda52d7e32011-11-10 11:59:00 -0800959int branch_clk_in_hwcg_mode(struct clk *c)
960{
961 struct branch_clk *clk = to_branch_clk(c);
962 return branch_in_hwcg_mode(&clk->b);
963}
964
965void rcg_clk_enable_hwcg(struct clk *clk)
966{
967 struct rcg_clk *rcg = to_rcg_clk(clk);
968 branch_enable_hwcg(&rcg->b);
969}
970
971void rcg_clk_disable_hwcg(struct clk *clk)
972{
973 struct rcg_clk *rcg = to_rcg_clk(clk);
974 branch_disable_hwcg(&rcg->b);
975}
976
977int rcg_clk_in_hwcg_mode(struct clk *c)
978{
979 struct rcg_clk *clk = to_rcg_clk(c);
980 return branch_in_hwcg_mode(&clk->b);
981}
982
Matt Wagantall7e0b6c92012-01-20 18:48:05 -0800983int rcg_clk_set_flags(struct clk *clk, unsigned flags)
984{
985 return branch_set_flags(&to_rcg_clk(clk)->b, flags);
986}
987
Stephen Boyda52d7e32011-11-10 11:59:00 -0800988int branch_reset(struct branch *b, enum clk_reset_action action)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989{
990 int ret = 0;
991 u32 reg_val;
992 unsigned long flags;
993
Stephen Boyda52d7e32011-11-10 11:59:00 -0800994 if (!b->reset_reg)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995 return -EPERM;
996
Stephen Boyda52d7e32011-11-10 11:59:00 -0800997 /* Disable hw gating when asserting a reset */
998 if (b->hwcg_mask && action == CLK_RESET_ASSERT)
999 branch_disable_hwcg(b);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000
Stephen Boyda52d7e32011-11-10 11:59:00 -08001001 spin_lock_irqsave(&local_clock_reg_lock, flags);
1002 /* Assert/Deassert reset */
1003 reg_val = readl_relaxed(b->reset_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004 switch (action) {
1005 case CLK_RESET_ASSERT:
Stephen Boyda52d7e32011-11-10 11:59:00 -08001006 reg_val |= b->reset_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007 break;
1008 case CLK_RESET_DEASSERT:
Stephen Boyda52d7e32011-11-10 11:59:00 -08001009 reg_val &= ~b->reset_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010 break;
1011 default:
1012 ret = -EINVAL;
1013 }
Stephen Boyda52d7e32011-11-10 11:59:00 -08001014 writel_relaxed(reg_val, b->reset_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
1016
Stephen Boyda52d7e32011-11-10 11:59:00 -08001017 /* Enable hw gating when deasserting a reset */
1018 if (b->hwcg_mask && action == CLK_RESET_DEASSERT)
1019 branch_enable_hwcg(b);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020 /* Make sure write is issued before returning. */
1021 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001022 return ret;
1023}
1024
1025int branch_clk_reset(struct clk *clk, enum clk_reset_action action)
1026{
1027 return branch_reset(&to_branch_clk(clk)->b, action);
1028}
Stephen Boydb8ad8222011-11-28 12:17:58 -08001029
Stephen Boyd7bf28142011-12-07 00:30:52 -08001030int rcg_clk_reset(struct clk *clk, enum clk_reset_action action)
1031{
1032 return branch_reset(&to_rcg_clk(clk)->b, action);
1033}
1034
Stephen Boydb8ad8222011-11-28 12:17:58 -08001035static int cdiv_clk_enable(struct clk *c)
1036{
1037 unsigned long flags;
1038 struct cdiv_clk *clk = to_cdiv_clk(c);
1039
1040 spin_lock_irqsave(&local_clock_reg_lock, flags);
1041 __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
1042 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
1043
1044 return 0;
1045}
1046
1047static void cdiv_clk_disable(struct clk *c)
1048{
1049 unsigned long flags;
1050 struct cdiv_clk *clk = to_cdiv_clk(c);
1051
1052 spin_lock_irqsave(&local_clock_reg_lock, flags);
1053 __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
1054 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
1055}
1056
1057static int cdiv_clk_set_rate(struct clk *c, unsigned long rate)
1058{
1059 struct cdiv_clk *clk = to_cdiv_clk(c);
1060 u32 reg_val;
1061
1062 if (rate > clk->max_div)
1063 return -EINVAL;
1064 /* Check if frequency is actually changed. */
1065 if (rate == clk->cur_div)
1066 return 0;
1067
1068 spin_lock(&local_clock_reg_lock);
1069 reg_val = readl_relaxed(clk->ns_reg);
1070 reg_val &= ~(clk->ext_mask | (clk->max_div - 1) << clk->div_offset);
1071 /* Non-zero rates mean set a divider, zero means use external input */
1072 if (rate)
1073 reg_val |= (rate - 1) << clk->div_offset;
1074 else
1075 reg_val |= clk->ext_mask;
1076 writel_relaxed(reg_val, clk->ns_reg);
1077 spin_unlock(&local_clock_reg_lock);
1078
1079 clk->cur_div = rate;
1080 return 0;
1081}
1082
1083static unsigned long cdiv_clk_get_rate(struct clk *c)
1084{
1085 struct cdiv_clk *clk = to_cdiv_clk(c);
1086 return clk->cur_div;
1087}
1088
1089static long cdiv_clk_round_rate(struct clk *c, unsigned long rate)
1090{
1091 struct cdiv_clk *clk = to_cdiv_clk(c);
1092 return rate > clk->max_div ? -EPERM : rate;
1093}
1094
1095static int cdiv_clk_list_rate(struct clk *c, unsigned n)
1096{
1097 struct cdiv_clk *clk = to_cdiv_clk(c);
1098 return n > clk->max_div ? -ENXIO : n;
1099}
1100
1101static int cdiv_clk_handoff(struct clk *c)
1102{
1103 struct cdiv_clk *clk = to_cdiv_clk(c);
1104 u32 reg_val;
1105
Stephen Boyda52d7e32011-11-10 11:59:00 -08001106 branch_handoff(&clk->b, &clk->c);
1107
Stephen Boydb8ad8222011-11-28 12:17:58 -08001108 reg_val = readl_relaxed(clk->ns_reg);
1109 if (reg_val & clk->ext_mask) {
1110 clk->cur_div = 0;
1111 } else {
1112 reg_val >>= clk->div_offset;
1113 clk->cur_div = (reg_val & (clk->max_div - 1)) + 1;
1114 }
1115
1116 return 0;
1117}
1118
Stephen Boyda52d7e32011-11-10 11:59:00 -08001119static void cdiv_clk_enable_hwcg(struct clk *c)
1120{
1121 struct cdiv_clk *clk = to_cdiv_clk(c);
1122 branch_enable_hwcg(&clk->b);
1123}
1124
1125static void cdiv_clk_disable_hwcg(struct clk *c)
1126{
1127 struct cdiv_clk *clk = to_cdiv_clk(c);
1128 branch_disable_hwcg(&clk->b);
1129}
1130
1131static int cdiv_clk_in_hwcg_mode(struct clk *c)
1132{
1133 struct cdiv_clk *clk = to_cdiv_clk(c);
1134 return branch_in_hwcg_mode(&clk->b);
1135}
1136
Stephen Boydb8ad8222011-11-28 12:17:58 -08001137struct clk_ops clk_ops_cdiv = {
1138 .enable = cdiv_clk_enable,
1139 .disable = cdiv_clk_disable,
Stephen Boyda52d7e32011-11-10 11:59:00 -08001140 .in_hwcg_mode = cdiv_clk_in_hwcg_mode,
1141 .enable_hwcg = cdiv_clk_enable_hwcg,
1142 .disable_hwcg = cdiv_clk_disable_hwcg,
Stephen Boydb8ad8222011-11-28 12:17:58 -08001143 .auto_off = cdiv_clk_disable,
1144 .handoff = cdiv_clk_handoff,
1145 .set_rate = cdiv_clk_set_rate,
1146 .get_rate = cdiv_clk_get_rate,
1147 .list_rate = cdiv_clk_list_rate,
1148 .round_rate = cdiv_clk_round_rate,
1149 .is_local = local_clk_is_local,
1150};