blob: acda059e4309651c37796ee9c791dde4201a6dca [file] [log] [blame]
Matt Wagantallab1adce2012-01-24 14:57:24 -08001/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/msm_iomap.h>
27#include <mach/clk.h>
28#include <mach/scm-io.h>
29
30#include "clock.h"
31#include "clock-local.h"
32
33#ifdef CONFIG_MSM_SECURE_IO
34#undef readl_relaxed
35#undef writel_relaxed
36#define readl_relaxed secure_readl
37#define writel_relaxed secure_writel
38#endif
39
40/*
41 * When enabling/disabling a clock, check the halt bit up to this number
42 * number of times (with a 1 us delay in between) before continuing.
43 */
Stephen Boyd138da0e2011-08-05 13:25:57 -070044#define HALT_CHECK_MAX_LOOPS 200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045/* For clock without halt checking, wait this long after enables/disables. */
46#define HALT_CHECK_DELAY_US 10
47
48DEFINE_SPINLOCK(local_clock_reg_lock);
Matt Wagantall84f43fd2011-08-16 23:28:38 -070049struct clk_freq_tbl rcg_dummy_freq = F_END;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051/*
52 * Common Set-Rate Functions
53 */
54
55/* For clocks with MND dividers. */
56void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf)
57{
58 uint32_t ns_reg_val, ctl_reg_val;
59
60 /* Assert MND reset. */
61 ns_reg_val = readl_relaxed(clk->ns_reg);
62 ns_reg_val |= BIT(7);
63 writel_relaxed(ns_reg_val, clk->ns_reg);
64
65 /* Program M and D values. */
66 writel_relaxed(nf->md_val, clk->md_reg);
67
68 /* If the clock has a separate CC register, program it. */
69 if (clk->ns_reg != clk->b.ctl_reg) {
70 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
71 ctl_reg_val &= ~(clk->ctl_mask);
72 ctl_reg_val |= nf->ctl_val;
73 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
74 }
75
76 /* Deassert MND reset. */
77 ns_reg_val &= ~BIT(7);
78 writel_relaxed(ns_reg_val, clk->ns_reg);
79}
80
81void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf)
82{
83 /*
84 * Nothing to do for fixed-rate or integer-divider clocks. Any settings
85 * in NS registers are applied in the enable path, since power can be
86 * saved by leaving an un-clocked or slowly-clocked source selected
87 * until the clock is enabled.
88 */
89}
90
91void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf)
92{
93 uint32_t ctl_reg_val;
94
95 /* Assert MND reset. */
96 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
97 ctl_reg_val |= BIT(8);
98 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
99
100 /* Program M and D values. */
101 writel_relaxed(nf->md_val, clk->md_reg);
102
103 /* Program MN counter Enable and Mode. */
104 ctl_reg_val &= ~(clk->ctl_mask);
105 ctl_reg_val |= nf->ctl_val;
106 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
107
108 /* Deassert MND reset. */
109 ctl_reg_val &= ~BIT(8);
110 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
111}
112
113void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
114{
Stephen Boydc78d9a72011-07-20 00:46:24 -0700115 struct bank_masks *banks = clk->bank_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 const struct bank_mask_info *new_bank_masks;
117 const struct bank_mask_info *old_bank_masks;
118 uint32_t ns_reg_val, ctl_reg_val;
119 uint32_t bank_sel;
120
121 /*
122 * Determine active bank and program the other one. If the clock is
123 * off, program the active bank since bank switching won't work if
124 * both banks aren't running.
125 */
126 ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
127 bank_sel = !!(ctl_reg_val & banks->bank_sel_mask);
128 /* If clock isn't running, don't switch banks. */
129 bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
130 if (bank_sel == 0) {
131 new_bank_masks = &banks->bank1_mask;
132 old_bank_masks = &banks->bank0_mask;
133 } else {
134 new_bank_masks = &banks->bank0_mask;
135 old_bank_masks = &banks->bank1_mask;
136 }
137
138 ns_reg_val = readl_relaxed(clk->ns_reg);
139
140 /* Assert bank MND reset. */
141 ns_reg_val |= new_bank_masks->rst_mask;
142 writel_relaxed(ns_reg_val, clk->ns_reg);
143
144 /*
145 * Program NS only if the clock is enabled, since the NS will be set
146 * as part of the enable procedure and should remain with a low-power
147 * MUX input selected until then.
148 */
149 if (clk->enabled) {
150 ns_reg_val &= ~(new_bank_masks->ns_mask);
151 ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
152 writel_relaxed(ns_reg_val, clk->ns_reg);
153 }
154
155 writel_relaxed(nf->md_val, new_bank_masks->md_reg);
156
157 /* Enable counter only if clock is enabled. */
158 if (clk->enabled)
159 ctl_reg_val |= new_bank_masks->mnd_en_mask;
160 else
161 ctl_reg_val &= ~(new_bank_masks->mnd_en_mask);
162
163 ctl_reg_val &= ~(new_bank_masks->mode_mask);
164 ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask);
165 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
166
167 /* Deassert bank MND reset. */
168 ns_reg_val &= ~(new_bank_masks->rst_mask);
169 writel_relaxed(ns_reg_val, clk->ns_reg);
170
171 /*
172 * Switch to the new bank if clock is running. If it isn't, then
173 * no switch is necessary since we programmed the active bank.
174 */
175 if (clk->enabled && clk->current_freq->freq_hz) {
176 ctl_reg_val ^= banks->bank_sel_mask;
177 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
178 /*
179 * Wait at least 6 cycles of slowest bank's clock
180 * for the glitch-free MUX to fully switch sources.
181 */
182 mb();
183 udelay(1);
184
185 /* Disable old bank's MN counter. */
186 ctl_reg_val &= ~(old_bank_masks->mnd_en_mask);
187 writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
188
189 /* Program old bank to a low-power source and divider. */
190 ns_reg_val &= ~(old_bank_masks->ns_mask);
191 ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
192 writel_relaxed(ns_reg_val, clk->ns_reg);
193 }
194
Matt Wagantall07c45472012-02-10 23:27:24 -0800195 /* Update the MND_EN and NS masks to match the current bank. */
196 clk->mnd_en_mask = new_bank_masks->mnd_en_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 clk->ns_mask = new_bank_masks->ns_mask;
198}
199
200void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
201{
Stephen Boydc78d9a72011-07-20 00:46:24 -0700202 struct bank_masks *banks = clk->bank_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 const struct bank_mask_info *new_bank_masks;
204 const struct bank_mask_info *old_bank_masks;
205 uint32_t ns_reg_val, bank_sel;
206
207 /*
208 * Determine active bank and program the other one. If the clock is
209 * off, program the active bank since bank switching won't work if
210 * both banks aren't running.
211 */
212 ns_reg_val = readl_relaxed(clk->ns_reg);
213 bank_sel = !!(ns_reg_val & banks->bank_sel_mask);
214 /* If clock isn't running, don't switch banks. */
215 bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
216 if (bank_sel == 0) {
217 new_bank_masks = &banks->bank1_mask;
218 old_bank_masks = &banks->bank0_mask;
219 } else {
220 new_bank_masks = &banks->bank0_mask;
221 old_bank_masks = &banks->bank1_mask;
222 }
223
224 /*
225 * Program NS only if the clock is enabled, since the NS will be set
226 * as part of the enable procedure and should remain with a low-power
227 * MUX input selected until then.
228 */
229 if (clk->enabled) {
230 ns_reg_val &= ~(new_bank_masks->ns_mask);
231 ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
232 writel_relaxed(ns_reg_val, clk->ns_reg);
233 }
234
235 /*
236 * Switch to the new bank if clock is running. If it isn't, then
237 * no switch is necessary since we programmed the active bank.
238 */
239 if (clk->enabled && clk->current_freq->freq_hz) {
240 ns_reg_val ^= banks->bank_sel_mask;
241 writel_relaxed(ns_reg_val, clk->ns_reg);
242 /*
243 * Wait at least 6 cycles of slowest bank's clock
244 * for the glitch-free MUX to fully switch sources.
245 */
246 mb();
247 udelay(1);
248
249 /* Program old bank to a low-power source and divider. */
250 ns_reg_val &= ~(old_bank_masks->ns_mask);
251 ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
252 writel_relaxed(ns_reg_val, clk->ns_reg);
253 }
254
255 /* Update the NS mask to match the current bank. */
256 clk->ns_mask = new_bank_masks->ns_mask;
257}
258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259/*
260 * Clock enable/disable functions
261 */
262
263/* Return non-zero if a clock status registers shows the clock is halted. */
264static int branch_clk_is_halted(const struct branch *clk)
265{
266 int invert = (clk->halt_check == ENABLE);
267 int status_bit = readl_relaxed(clk->halt_reg) & BIT(clk->halt_bit);
268 return invert ? !status_bit : status_bit;
269}
270
Stephen Boyda52d7e32011-11-10 11:59:00 -0800271int branch_in_hwcg_mode(const struct branch *b)
272{
273 if (!b->hwcg_mask)
274 return 0;
275
276 return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask);
277}
278
Stephen Boyd092fd182011-10-21 15:56:30 -0700279void __branch_clk_enable_reg(const struct branch *clk, const char *name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280{
281 u32 reg_val;
282
283 if (clk->en_mask) {
284 reg_val = readl_relaxed(clk->ctl_reg);
285 reg_val |= clk->en_mask;
286 writel_relaxed(reg_val, clk->ctl_reg);
287 }
288
289 /*
290 * Use a memory barrier since some halt status registers are
291 * not within the same 1K segment as the branch/root enable
292 * registers. It's also needed in the udelay() case to ensure
293 * the delay starts after the branch enable.
294 */
295 mb();
296
Stephen Boyda52d7e32011-11-10 11:59:00 -0800297 /* Skip checking halt bit if the clock is in hardware gated mode */
298 if (branch_in_hwcg_mode(clk))
299 return;
300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 /* Wait for clock to enable before returning. */
302 if (clk->halt_check == DELAY)
303 udelay(HALT_CHECK_DELAY_US);
304 else if (clk->halt_check == ENABLE || clk->halt_check == HALT
305 || clk->halt_check == ENABLE_VOTED
306 || clk->halt_check == HALT_VOTED) {
307 int count;
308
309 /* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
310 for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(clk)
311 && count > 0; count--)
312 udelay(1);
313 WARN(count == 0, "%s status stuck at 'off'", name);
314 }
315}
316
317/* Perform any register operations required to enable the clock. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700318static void __rcg_clk_enable_reg(struct rcg_clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319{
320 u32 reg_val;
321 void __iomem *const reg = clk->b.ctl_reg;
322
Matt Wagantall84f43fd2011-08-16 23:28:38 -0700323 WARN(clk->current_freq == &rcg_dummy_freq,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324 "Attempting to enable %s before setting its rate. "
325 "Set the rate first!\n", clk->c.dbg_name);
326
327 /*
328 * Program the NS register, if applicable. NS registers are not
329 * set in the set_rate path because power can be saved by deferring
330 * the selection of a clocked source until the clock is enabled.
331 */
332 if (clk->ns_mask) {
333 reg_val = readl_relaxed(clk->ns_reg);
334 reg_val &= ~(clk->ns_mask);
335 reg_val |= (clk->current_freq->ns_val & clk->ns_mask);
336 writel_relaxed(reg_val, clk->ns_reg);
337 }
338
339 /* Enable MN counter, if applicable. */
340 reg_val = readl_relaxed(reg);
Matt Wagantall07c45472012-02-10 23:27:24 -0800341 if (clk->current_freq->md_val) {
342 reg_val |= clk->mnd_en_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343 writel_relaxed(reg_val, reg);
344 }
345 /* Enable root. */
346 if (clk->root_en_mask) {
347 reg_val |= clk->root_en_mask;
348 writel_relaxed(reg_val, reg);
349 }
350 __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
351}
352
353/* Perform any register operations required to disable the branch. */
Stephen Boyd092fd182011-10-21 15:56:30 -0700354u32 __branch_clk_disable_reg(const struct branch *clk, const char *name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355{
356 u32 reg_val;
357
358 reg_val = readl_relaxed(clk->ctl_reg);
359 if (clk->en_mask) {
360 reg_val &= ~(clk->en_mask);
361 writel_relaxed(reg_val, clk->ctl_reg);
362 }
363
364 /*
365 * Use a memory barrier since some halt status registers are
366 * not within the same K segment as the branch/root enable
367 * registers. It's also needed in the udelay() case to ensure
368 * the delay starts after the branch disable.
369 */
370 mb();
371
Stephen Boyda52d7e32011-11-10 11:59:00 -0800372 /* Skip checking halt bit if the clock is in hardware gated mode */
373 if (branch_in_hwcg_mode(clk))
374 return reg_val;
375
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 /* Wait for clock to disable before continuing. */
377 if (clk->halt_check == DELAY || clk->halt_check == ENABLE_VOTED
378 || clk->halt_check == HALT_VOTED)
379 udelay(HALT_CHECK_DELAY_US);
380 else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
381 int count;
382
383 /* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
384 for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(clk)
385 && count > 0; count--)
386 udelay(1);
387 WARN(count == 0, "%s status stuck at 'on'", name);
388 }
389
390 return reg_val;
391}
392
393/* Perform any register operations required to disable the generator. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700394static void __rcg_clk_disable_reg(struct rcg_clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395{
396 void __iomem *const reg = clk->b.ctl_reg;
397 uint32_t reg_val;
398
399 reg_val = __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
400 /* Disable root. */
401 if (clk->root_en_mask) {
402 reg_val &= ~(clk->root_en_mask);
403 writel_relaxed(reg_val, reg);
404 }
405 /* Disable MN counter, if applicable. */
Matt Wagantall07c45472012-02-10 23:27:24 -0800406 if (clk->current_freq->md_val) {
407 reg_val &= ~(clk->mnd_en_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 writel_relaxed(reg_val, reg);
409 }
410 /*
411 * Program NS register to low-power value with an un-clocked or
412 * slowly-clocked source selected.
413 */
414 if (clk->ns_mask) {
415 reg_val = readl_relaxed(clk->ns_reg);
416 reg_val &= ~(clk->ns_mask);
417 reg_val |= (clk->freq_tbl->ns_val & clk->ns_mask);
418 writel_relaxed(reg_val, clk->ns_reg);
419 }
420}
421
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700422/* Enable a rate-settable clock. */
423int rcg_clk_enable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424{
425 unsigned long flags;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700426 struct rcg_clk *clk = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427
428 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0625ea02011-07-13 18:51:56 -0700429 __rcg_clk_enable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 clk->enabled = true;
431 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700432
433 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434}
435
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700436/* Disable a rate-settable clock. */
437void rcg_clk_disable(struct clk *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438{
439 unsigned long flags;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700440 struct rcg_clk *clk = to_rcg_clk(c);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441
442 spin_lock_irqsave(&local_clock_reg_lock, flags);
Matt Wagantall0625ea02011-07-13 18:51:56 -0700443 __rcg_clk_disable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444 clk->enabled = false;
445 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
446}
447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448/*
449 * Frequency-related functions
450 */
451
Matt Wagantallab1adce2012-01-24 14:57:24 -0800452/* Set a clock to an exact rate. */
453int rcg_clk_set_rate(struct clk *c, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454{
Matt Wagantallab1adce2012-01-24 14:57:24 -0800455 struct rcg_clk *clk = to_rcg_clk(c);
456 struct clk_freq_tbl *nf, *cf;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 struct clk *chld;
Matt Wagantallab1adce2012-01-24 14:57:24 -0800458 int rc = 0;
459
460 for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
461 && nf->freq_hz != rate; nf++)
462 ;
463
464 if (nf->freq_hz == FREQ_END)
465 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466
467 /* Check if frequency is actually changed. */
468 cf = clk->current_freq;
469 if (nf == cf)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700470 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471
472 if (clk->enabled) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700473 /* Enable source clock dependency for the new freq. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474 rc = clk_enable(nf->src_clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700475 if (rc)
476 return rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 }
478
479 spin_lock(&local_clock_reg_lock);
480
481 /* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
Stephen Boydc78d9a72011-07-20 00:46:24 -0700482 if (!clk->bank_info) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 /* Disable all branches to prevent glitches. */
484 list_for_each_entry(chld, &clk->c.children, siblings) {
485 struct branch_clk *x = to_branch_clk(chld);
486 /*
487 * We don't need to grab the child's lock because
488 * we hold the local_clock_reg_lock and 'enabled' is
489 * only modified within lock.
490 */
491 if (x->enabled)
492 __branch_clk_disable_reg(&x->b, x->c.dbg_name);
493 }
494 if (clk->enabled)
Matt Wagantall0625ea02011-07-13 18:51:56 -0700495 __rcg_clk_disable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 }
497
498 /* Perform clock-specific frequency switch operations. */
499 BUG_ON(!clk->set_rate);
500 clk->set_rate(clk, nf);
501
502 /*
Matt Wagantall0625ea02011-07-13 18:51:56 -0700503 * Current freq must be updated before __rcg_clk_enable_reg()
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 * is called to make sure the MNCNTR_EN bit is set correctly.
505 */
506 clk->current_freq = nf;
507
508 /* Enable any clocks that were disabled. */
Stephen Boydc78d9a72011-07-20 00:46:24 -0700509 if (!clk->bank_info) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 if (clk->enabled)
Matt Wagantall0625ea02011-07-13 18:51:56 -0700511 __rcg_clk_enable_reg(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 /* Enable only branches that were ON before. */
513 list_for_each_entry(chld, &clk->c.children, siblings) {
514 struct branch_clk *x = to_branch_clk(chld);
515 if (x->enabled)
516 __branch_clk_enable_reg(&x->b, x->c.dbg_name);
517 }
518 }
519
520 spin_unlock(&local_clock_reg_lock);
521
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700522 /* Release source requirements of the old freq. */
523 if (clk->enabled)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 clk_disable(cf->src_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525
526 return rc;
527}
528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529/* Check if a clock is currently enabled. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700530int rcg_clk_is_enabled(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531{
532 return to_rcg_clk(clk)->enabled;
533}
534
535/* Return a supported rate that's at least the specified rate. */
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700536long rcg_clk_round_rate(struct clk *c, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537{
538 struct rcg_clk *clk = to_rcg_clk(c);
539 struct clk_freq_tbl *f;
540
541 for (f = clk->freq_tbl; f->freq_hz != FREQ_END; f++)
542 if (f->freq_hz >= rate)
543 return f->freq_hz;
544
545 return -EPERM;
546}
547
548bool local_clk_is_local(struct clk *clk)
549{
550 return true;
551}
552
553/* Return the nth supported frequency for a given clock. */
Matt Wagantall0625ea02011-07-13 18:51:56 -0700554int rcg_clk_list_rate(struct clk *c, unsigned n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555{
556 struct rcg_clk *clk = to_rcg_clk(c);
557
558 if (!clk->freq_tbl || clk->freq_tbl->freq_hz == FREQ_END)
559 return -ENXIO;
560
561 return (clk->freq_tbl + n)->freq_hz;
562}
563
Matt Wagantall0625ea02011-07-13 18:51:56 -0700564struct clk *rcg_clk_get_parent(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565{
566 return to_rcg_clk(clk)->current_freq->src_clk;
567}
568
Stephen Boyda52d7e32011-11-10 11:59:00 -0800569/* Disable hw clock gating if not set at boot */
Matt Wagantalla15833b2012-04-03 11:00:56 -0700570enum handoff branch_handoff(struct branch *clk, struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800571{
572 if (!branch_in_hwcg_mode(clk)) {
573 clk->hwcg_mask = 0;
574 c->flags &= ~CLKFLAG_HWCG;
Matt Wagantalla15833b2012-04-03 11:00:56 -0700575 if (readl_relaxed(clk->ctl_reg) & clk->en_mask)
576 return HANDOFF_ENABLED_CLK;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800577 } else {
578 c->flags |= CLKFLAG_HWCG;
579 }
Matt Wagantalla15833b2012-04-03 11:00:56 -0700580 return HANDOFF_DISABLED_CLK;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800581}
582
Matt Wagantalla15833b2012-04-03 11:00:56 -0700583enum handoff branch_clk_handoff(struct clk *c)
Stephen Boyda52d7e32011-11-10 11:59:00 -0800584{
585 struct branch_clk *clk = to_branch_clk(c);
Matt Wagantalla15833b2012-04-03 11:00:56 -0700586 return branch_handoff(&clk->b, &clk->c);
Stephen Boyda52d7e32011-11-10 11:59:00 -0800587}
588
Matt Wagantalla15833b2012-04-03 11:00:56 -0700589enum handoff rcg_clk_handoff(struct clk *c)
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700590{
591 struct rcg_clk *clk = to_rcg_clk(c);
592 uint32_t ctl_val, ns_val, md_val, ns_mask;
593 struct clk_freq_tbl *freq;
Matt Wagantalla15833b2012-04-03 11:00:56 -0700594 enum handoff ret;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800595
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700596 ctl_val = readl_relaxed(clk->b.ctl_reg);
Matt Wagantalla15833b2012-04-03 11:00:56 -0700597 ret = branch_handoff(&clk->b, &clk->c);
598 if (ret == HANDOFF_DISABLED_CLK)
599 return HANDOFF_DISABLED_CLK;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700600
Stephen Boydc78d9a72011-07-20 00:46:24 -0700601 if (clk->bank_info) {
602 const struct bank_masks *bank_masks = clk->bank_info;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700603 const struct bank_mask_info *bank_info;
Stephen Boydc78d9a72011-07-20 00:46:24 -0700604 if (!(ctl_val & bank_masks->bank_sel_mask))
605 bank_info = &bank_masks->bank0_mask;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700606 else
Stephen Boydc78d9a72011-07-20 00:46:24 -0700607 bank_info = &bank_masks->bank1_mask;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700608
609 ns_mask = bank_info->ns_mask;
Tianyi Goue46938b2012-01-31 12:30:12 -0800610 md_val = bank_info->md_reg ?
611 readl_relaxed(bank_info->md_reg) : 0;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700612 } else {
613 ns_mask = clk->ns_mask;
614 md_val = clk->md_reg ? readl_relaxed(clk->md_reg) : 0;
615 }
Matt Wagantalla15833b2012-04-03 11:00:56 -0700616 if (!ns_mask)
617 return HANDOFF_UNKNOWN_RATE;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700618 ns_val = readl_relaxed(clk->ns_reg) & ns_mask;
619 for (freq = clk->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
620 if ((freq->ns_val & ns_mask) == ns_val &&
Matt Wagantall07c45472012-02-10 23:27:24 -0800621 (!freq->md_val || freq->md_val == md_val)) {
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700622 pr_info("%s rate=%d\n", clk->c.dbg_name, freq->freq_hz);
623 break;
624 }
625 }
626 if (freq->freq_hz == FREQ_END)
Matt Wagantalla15833b2012-04-03 11:00:56 -0700627 return HANDOFF_UNKNOWN_RATE;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700628
629 clk->current_freq = freq;
Stephen Boyde891ca32012-03-19 12:16:36 -0700630 c->rate = freq->freq_hz;
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700631
Matt Wagantalla15833b2012-04-03 11:00:56 -0700632 return HANDOFF_ENABLED_CLK;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700633}
634
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700635int pll_vote_clk_enable(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636{
637 u32 ena;
638 unsigned long flags;
639 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
640
641 spin_lock_irqsave(&local_clock_reg_lock, flags);
642 ena = readl_relaxed(pll->en_reg);
643 ena |= pll->en_mask;
644 writel_relaxed(ena, pll->en_reg);
645 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
646
647 /* Wait until PLL is enabled */
648 while ((readl_relaxed(pll->status_reg) & BIT(16)) == 0)
649 cpu_relax();
650
651 return 0;
652}
653
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700654void pll_vote_clk_disable(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700655{
656 u32 ena;
657 unsigned long flags;
658 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
659
660 spin_lock_irqsave(&local_clock_reg_lock, flags);
661 ena = readl_relaxed(pll->en_reg);
662 ena &= ~(pll->en_mask);
663 writel_relaxed(ena, pll->en_reg);
664 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
665}
666
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700667struct clk *pll_vote_clk_get_parent(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668{
669 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
670 return pll->parent;
671}
672
Vikram Mulukutla31680ae2011-11-04 14:23:55 -0700673int pll_vote_clk_is_enabled(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674{
675 struct pll_vote_clk *pll = to_pll_vote_clk(clk);
676 return !!(readl_relaxed(pll->status_reg) & BIT(16));
677}
678
679struct clk_ops clk_ops_pll_vote = {
680 .enable = pll_vote_clk_enable,
681 .disable = pll_vote_clk_disable,
Matt Wagantalle3d939d2011-11-06 11:21:37 -0800682 .auto_off = pll_vote_clk_disable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683 .is_enabled = pll_vote_clk_is_enabled,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684 .get_parent = pll_vote_clk_get_parent,
685 .is_local = local_clk_is_local,
686};
687
688static int pll_clk_enable(struct clk *clk)
689{
690 u32 mode;
691 unsigned long flags;
692 struct pll_clk *pll = to_pll_clk(clk);
693
694 spin_lock_irqsave(&local_clock_reg_lock, flags);
695 mode = readl_relaxed(pll->mode_reg);
696 /* Disable PLL bypass mode. */
697 mode |= BIT(1);
698 writel_relaxed(mode, pll->mode_reg);
699
700 /*
701 * H/W requires a 5us delay between disabling the bypass and
702 * de-asserting the reset. Delay 10us just to be safe.
703 */
704 mb();
705 udelay(10);
706
707 /* De-assert active-low PLL reset. */
708 mode |= BIT(2);
709 writel_relaxed(mode, pll->mode_reg);
710
711 /* Wait until PLL is locked. */
712 mb();
713 udelay(50);
714
715 /* Enable PLL output. */
716 mode |= BIT(0);
717 writel_relaxed(mode, pll->mode_reg);
718
719 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
720 return 0;
721}
722
723static void pll_clk_disable(struct clk *clk)
724{
725 u32 mode;
726 unsigned long flags;
727 struct pll_clk *pll = to_pll_clk(clk);
728
729 /*
730 * Disable the PLL output, disable test mode, enable
731 * the bypass mode, and assert the reset.
732 */
733 spin_lock_irqsave(&local_clock_reg_lock, flags);
734 mode = readl_relaxed(pll->mode_reg);
735 mode &= ~BM(3, 0);
736 writel_relaxed(mode, pll->mode_reg);
737 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
738}
739
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740static struct clk *pll_clk_get_parent(struct clk *clk)
741{
742 struct pll_clk *pll = to_pll_clk(clk);
743 return pll->parent;
744}
745
Vikram Mulukutla489e39e2011-08-31 18:04:05 -0700746int sr_pll_clk_enable(struct clk *clk)
747{
748 u32 mode;
749 unsigned long flags;
750 struct pll_clk *pll = to_pll_clk(clk);
751
752 spin_lock_irqsave(&local_clock_reg_lock, flags);
753 mode = readl_relaxed(pll->mode_reg);
754 /* De-assert active-low PLL reset. */
755 mode |= BIT(2);
756 writel_relaxed(mode, pll->mode_reg);
757
758 /*
759 * H/W requires a 5us delay between disabling the bypass and
760 * de-asserting the reset. Delay 10us just to be safe.
761 */
762 mb();
763 udelay(10);
764
765 /* Disable PLL bypass mode. */
766 mode |= BIT(1);
767 writel_relaxed(mode, pll->mode_reg);
768
769 /* Wait until PLL is locked. */
770 mb();
771 udelay(60);
772
773 /* Enable PLL output. */
774 mode |= BIT(0);
775 writel_relaxed(mode, pll->mode_reg);
776
777 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
778 return 0;
779}
780
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700781struct clk_ops clk_ops_pll = {
782 .enable = pll_clk_enable,
783 .disable = pll_clk_disable,
Matt Wagantalle3d939d2011-11-06 11:21:37 -0800784 .auto_off = pll_clk_disable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 .get_parent = pll_clk_get_parent,
786 .is_local = local_clk_is_local,
787};
788
789struct clk_ops clk_ops_gnd = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 .is_local = local_clk_is_local,
791};
792
793struct fixed_clk gnd_clk = {
794 .c = {
795 .dbg_name = "ground_clk",
796 .ops = &clk_ops_gnd,
797 CLK_INIT(gnd_clk.c),
798 },
799};
800
801struct clk_ops clk_ops_measure = {
802 .is_local = local_clk_is_local,
803};
804
805int branch_clk_enable(struct clk *clk)
806{
807 unsigned long flags;
808 struct branch_clk *branch = to_branch_clk(clk);
809
810 spin_lock_irqsave(&local_clock_reg_lock, flags);
811 __branch_clk_enable_reg(&branch->b, branch->c.dbg_name);
812 branch->enabled = true;
813 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
814
815 return 0;
816}
817
818void branch_clk_disable(struct clk *clk)
819{
820 unsigned long flags;
821 struct branch_clk *branch = to_branch_clk(clk);
822
823 spin_lock_irqsave(&local_clock_reg_lock, flags);
824 __branch_clk_disable_reg(&branch->b, branch->c.dbg_name);
825 branch->enabled = false;
826 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827}
828
829struct clk *branch_clk_get_parent(struct clk *clk)
830{
831 struct branch_clk *branch = to_branch_clk(clk);
832 return branch->parent;
833}
834
835int branch_clk_set_parent(struct clk *clk, struct clk *parent)
836{
837 /*
838 * We setup the parent pointer at init time in msm_clock_init().
839 * This check is to make sure drivers can't change the parent.
840 */
841 if (parent && list_empty(&clk->siblings)) {
842 list_add(&clk->siblings, &parent->children);
843 return 0;
844 }
845 return -EINVAL;
846}
847
848int branch_clk_is_enabled(struct clk *clk)
849{
850 struct branch_clk *branch = to_branch_clk(clk);
851 return branch->enabled;
852}
853
Stephen Boyda52d7e32011-11-10 11:59:00 -0800854static void branch_enable_hwcg(struct branch *b)
855{
856 unsigned long flags;
857 u32 reg_val;
858
859 spin_lock_irqsave(&local_clock_reg_lock, flags);
860 reg_val = readl_relaxed(b->hwcg_reg);
861 reg_val |= b->hwcg_mask;
862 writel_relaxed(reg_val, b->hwcg_reg);
863 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
864}
865
866static void branch_disable_hwcg(struct branch *b)
867{
868 unsigned long flags;
869 u32 reg_val;
870
871 spin_lock_irqsave(&local_clock_reg_lock, flags);
872 reg_val = readl_relaxed(b->hwcg_reg);
873 reg_val &= ~b->hwcg_mask;
874 writel_relaxed(reg_val, b->hwcg_reg);
875 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
876}
877
878void branch_clk_enable_hwcg(struct clk *clk)
879{
880 struct branch_clk *branch = to_branch_clk(clk);
881 branch_enable_hwcg(&branch->b);
882}
883
884void branch_clk_disable_hwcg(struct clk *clk)
885{
886 struct branch_clk *branch = to_branch_clk(clk);
887 branch_disable_hwcg(&branch->b);
888}
889
Matt Wagantall7e0b6c92012-01-20 18:48:05 -0800890static int branch_set_flags(struct branch *b, unsigned flags)
891{
892 unsigned long irq_flags;
893 u32 reg_val;
894 int ret = 0;
895
896 if (!b->retain_reg)
897 return -EPERM;
898
899 spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
900 reg_val = readl_relaxed(b->retain_reg);
901 switch (flags) {
902 case CLKFLAG_RETAIN:
903 reg_val |= b->retain_mask;
904 break;
905 case CLKFLAG_NORETAIN:
906 reg_val &= ~b->retain_mask;
907 break;
908 default:
909 ret = -EINVAL;
910 }
911 writel_relaxed(reg_val, b->retain_reg);
912 spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
913
914 return ret;
915}
916
917int branch_clk_set_flags(struct clk *clk, unsigned flags)
918{
919 return branch_set_flags(&to_branch_clk(clk)->b, flags);
920}
921
Stephen Boyda52d7e32011-11-10 11:59:00 -0800922int branch_clk_in_hwcg_mode(struct clk *c)
923{
924 struct branch_clk *clk = to_branch_clk(c);
925 return branch_in_hwcg_mode(&clk->b);
926}
927
928void rcg_clk_enable_hwcg(struct clk *clk)
929{
930 struct rcg_clk *rcg = to_rcg_clk(clk);
931 branch_enable_hwcg(&rcg->b);
932}
933
934void rcg_clk_disable_hwcg(struct clk *clk)
935{
936 struct rcg_clk *rcg = to_rcg_clk(clk);
937 branch_disable_hwcg(&rcg->b);
938}
939
940int rcg_clk_in_hwcg_mode(struct clk *c)
941{
942 struct rcg_clk *clk = to_rcg_clk(c);
943 return branch_in_hwcg_mode(&clk->b);
944}
945
Matt Wagantall7e0b6c92012-01-20 18:48:05 -0800946int rcg_clk_set_flags(struct clk *clk, unsigned flags)
947{
948 return branch_set_flags(&to_rcg_clk(clk)->b, flags);
949}
950
Stephen Boyda52d7e32011-11-10 11:59:00 -0800951int branch_reset(struct branch *b, enum clk_reset_action action)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952{
953 int ret = 0;
954 u32 reg_val;
955 unsigned long flags;
956
Stephen Boyda52d7e32011-11-10 11:59:00 -0800957 if (!b->reset_reg)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958 return -EPERM;
959
Stephen Boyda52d7e32011-11-10 11:59:00 -0800960 /* Disable hw gating when asserting a reset */
961 if (b->hwcg_mask && action == CLK_RESET_ASSERT)
962 branch_disable_hwcg(b);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963
Stephen Boyda52d7e32011-11-10 11:59:00 -0800964 spin_lock_irqsave(&local_clock_reg_lock, flags);
965 /* Assert/Deassert reset */
966 reg_val = readl_relaxed(b->reset_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 switch (action) {
968 case CLK_RESET_ASSERT:
Stephen Boyda52d7e32011-11-10 11:59:00 -0800969 reg_val |= b->reset_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970 break;
971 case CLK_RESET_DEASSERT:
Stephen Boyda52d7e32011-11-10 11:59:00 -0800972 reg_val &= ~b->reset_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 break;
974 default:
975 ret = -EINVAL;
976 }
Stephen Boyda52d7e32011-11-10 11:59:00 -0800977 writel_relaxed(reg_val, b->reset_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
979
Stephen Boyda52d7e32011-11-10 11:59:00 -0800980 /* Enable hw gating when deasserting a reset */
981 if (b->hwcg_mask && action == CLK_RESET_DEASSERT)
982 branch_enable_hwcg(b);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700983 /* Make sure write is issued before returning. */
984 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985 return ret;
986}
987
988int branch_clk_reset(struct clk *clk, enum clk_reset_action action)
989{
990 return branch_reset(&to_branch_clk(clk)->b, action);
991}
Stephen Boydb8ad8222011-11-28 12:17:58 -0800992
Stephen Boyd7bf28142011-12-07 00:30:52 -0800993int rcg_clk_reset(struct clk *clk, enum clk_reset_action action)
994{
995 return branch_reset(&to_rcg_clk(clk)->b, action);
996}
997
Stephen Boydb8ad8222011-11-28 12:17:58 -0800998static int cdiv_clk_enable(struct clk *c)
999{
1000 unsigned long flags;
1001 struct cdiv_clk *clk = to_cdiv_clk(c);
1002
1003 spin_lock_irqsave(&local_clock_reg_lock, flags);
1004 __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
1005 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
1006
1007 return 0;
1008}
1009
1010static void cdiv_clk_disable(struct clk *c)
1011{
1012 unsigned long flags;
1013 struct cdiv_clk *clk = to_cdiv_clk(c);
1014
1015 spin_lock_irqsave(&local_clock_reg_lock, flags);
1016 __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
1017 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
1018}
1019
1020static int cdiv_clk_set_rate(struct clk *c, unsigned long rate)
1021{
1022 struct cdiv_clk *clk = to_cdiv_clk(c);
1023 u32 reg_val;
1024
1025 if (rate > clk->max_div)
1026 return -EINVAL;
1027 /* Check if frequency is actually changed. */
1028 if (rate == clk->cur_div)
1029 return 0;
1030
1031 spin_lock(&local_clock_reg_lock);
1032 reg_val = readl_relaxed(clk->ns_reg);
1033 reg_val &= ~(clk->ext_mask | (clk->max_div - 1) << clk->div_offset);
1034 /* Non-zero rates mean set a divider, zero means use external input */
1035 if (rate)
1036 reg_val |= (rate - 1) << clk->div_offset;
1037 else
1038 reg_val |= clk->ext_mask;
1039 writel_relaxed(reg_val, clk->ns_reg);
1040 spin_unlock(&local_clock_reg_lock);
1041
1042 clk->cur_div = rate;
1043 return 0;
1044}
1045
1046static unsigned long cdiv_clk_get_rate(struct clk *c)
1047{
1048 struct cdiv_clk *clk = to_cdiv_clk(c);
1049 return clk->cur_div;
1050}
1051
1052static long cdiv_clk_round_rate(struct clk *c, unsigned long rate)
1053{
1054 struct cdiv_clk *clk = to_cdiv_clk(c);
1055 return rate > clk->max_div ? -EPERM : rate;
1056}
1057
1058static int cdiv_clk_list_rate(struct clk *c, unsigned n)
1059{
1060 struct cdiv_clk *clk = to_cdiv_clk(c);
1061 return n > clk->max_div ? -ENXIO : n;
1062}
1063
Matt Wagantalla15833b2012-04-03 11:00:56 -07001064static enum handoff cdiv_clk_handoff(struct clk *c)
Stephen Boydb8ad8222011-11-28 12:17:58 -08001065{
1066 struct cdiv_clk *clk = to_cdiv_clk(c);
Matt Wagantalla15833b2012-04-03 11:00:56 -07001067 enum handoff ret;
Stephen Boydb8ad8222011-11-28 12:17:58 -08001068 u32 reg_val;
1069
Matt Wagantalla15833b2012-04-03 11:00:56 -07001070 ret = branch_handoff(&clk->b, &clk->c);
1071 if (ret == HANDOFF_DISABLED_CLK)
1072 return ret;
Stephen Boyda52d7e32011-11-10 11:59:00 -08001073
Stephen Boydb8ad8222011-11-28 12:17:58 -08001074 reg_val = readl_relaxed(clk->ns_reg);
1075 if (reg_val & clk->ext_mask) {
1076 clk->cur_div = 0;
1077 } else {
1078 reg_val >>= clk->div_offset;
1079 clk->cur_div = (reg_val & (clk->max_div - 1)) + 1;
1080 }
1081
Matt Wagantalla15833b2012-04-03 11:00:56 -07001082 return HANDOFF_ENABLED_CLK;
Stephen Boydb8ad8222011-11-28 12:17:58 -08001083}
1084
Stephen Boyda52d7e32011-11-10 11:59:00 -08001085static void cdiv_clk_enable_hwcg(struct clk *c)
1086{
1087 struct cdiv_clk *clk = to_cdiv_clk(c);
1088 branch_enable_hwcg(&clk->b);
1089}
1090
1091static void cdiv_clk_disable_hwcg(struct clk *c)
1092{
1093 struct cdiv_clk *clk = to_cdiv_clk(c);
1094 branch_disable_hwcg(&clk->b);
1095}
1096
1097static int cdiv_clk_in_hwcg_mode(struct clk *c)
1098{
1099 struct cdiv_clk *clk = to_cdiv_clk(c);
1100 return branch_in_hwcg_mode(&clk->b);
1101}
1102
Stephen Boydb8ad8222011-11-28 12:17:58 -08001103struct clk_ops clk_ops_cdiv = {
1104 .enable = cdiv_clk_enable,
1105 .disable = cdiv_clk_disable,
Stephen Boyda52d7e32011-11-10 11:59:00 -08001106 .in_hwcg_mode = cdiv_clk_in_hwcg_mode,
1107 .enable_hwcg = cdiv_clk_enable_hwcg,
1108 .disable_hwcg = cdiv_clk_disable_hwcg,
Stephen Boydb8ad8222011-11-28 12:17:58 -08001109 .auto_off = cdiv_clk_disable,
1110 .handoff = cdiv_clk_handoff,
1111 .set_rate = cdiv_clk_set_rate,
1112 .get_rate = cdiv_clk_get_rate,
1113 .list_rate = cdiv_clk_list_rate,
1114 .round_rate = cdiv_clk_round_rate,
1115 .is_local = local_clk_is_local,
1116};