blob: 0383f1d34dfca98c5153087ade0e2edc4cd8a3db [file] [log] [blame]
Vikram Mulukutla8810e342011-10-20 20:26:53 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
27
28#include "clock.h"
29#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
35#define HALT_CHECK_MAX_LOOPS 200
36/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
43#define UPDATE_CHECK_MAX_LOOPS 200
44
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
72#define CBCR_BRANCH_CDIV_MASK BM(24, 16)
73#define CBCR_BRANCH_CDIV_MASKED(val) BVAL(24, 16, (val));
74
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
113
114 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
115 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
116 cfg_regval |= nf->div_src_val;
117 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
118
119 rcg_update_config(rcg);
120}
121
122/* RCG set rate function for clocks with MND & Half Integer Dividers. */
123void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
124{
125 u32 cfg_regval;
126
127 writel_relaxed(nf->m_val, M_REG(rcg));
128 writel_relaxed(nf->n_val, N_REG(rcg));
129 writel_relaxed(nf->d_val, D_REG(rcg));
130
131 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
132 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
133 cfg_regval |= nf->div_src_val;
134
135 /* Activate or disable the M/N:D divider as necessary */
136 cfg_regval &= ~MND_MODE_MASK;
137 if (nf->n_val != 0)
138 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
139 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
140
141 rcg_update_config(rcg);
142}
143
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800144static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700145{
146 struct rcg_clk *rcg = to_rcg_clk(c);
147
148 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800149 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700150 "Set the rate first!\n", rcg->c.dbg_name);
151
152 return 0;
153}
154
155static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
156{
157 struct clk_freq_tbl *cf, *nf;
158 struct rcg_clk *rcg = to_rcg_clk(c);
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800159 int rc;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700160 unsigned long flags;
161
162 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
163 && nf->freq_hz != rate; nf++)
164 ;
165
166 if (nf->freq_hz == FREQ_END)
167 return -EINVAL;
168
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700169 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700170
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800171 /* Enable source clock dependency for the new freq. */
172 if (c->prepare_count) {
173 rc = clk_prepare(nf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700174 if (rc)
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800175 return rc;
176 }
177
178 spin_lock_irqsave(&c->lock, flags);
179 if (c->count) {
180 rc = clk_enable(nf->src_clk);
181 if (rc) {
182 spin_unlock_irqrestore(&c->lock, flags);
183 clk_unprepare(nf->src_clk);
184 return rc;
185 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700186 }
187
188 BUG_ON(!rcg->set_rate);
189
190 spin_lock_irqsave(&local_clock_reg_lock, flags);
191
192 /* Perform clock-specific frequency switch operations. */
193 rcg->set_rate(rcg, nf);
194
195 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
196
197 /* Release source requirements of the old freq. */
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800198 if (c->count)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700199 clk_disable(cf->src_clk);
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800200 spin_unlock_irqrestore(&c->lock, flags);
201
202 if (c->prepare_count)
203 clk_unprepare(cf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700204
205 rcg->current_freq = nf;
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800206
207 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700208}
209
210/* Return a supported rate that's at least the specified rate. */
211static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
212{
213 struct rcg_clk *rcg = to_rcg_clk(c);
214 struct clk_freq_tbl *f;
215
216 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
217 if (f->freq_hz >= rate)
218 return f->freq_hz;
219
220 return -EPERM;
221}
222
223/* Return the nth supported frequency for a given clock. */
224static int rcg_clk_list_rate(struct clk *c, unsigned n)
225{
226 struct rcg_clk *rcg = to_rcg_clk(c);
227
228 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
229 return -ENXIO;
230
231 return (rcg->freq_tbl + n)->freq_hz;
232}
233
234static struct clk *rcg_clk_get_parent(struct clk *c)
235{
236 return to_rcg_clk(c)->current_freq->src_clk;
237}
238
239static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg, int has_mnd)
240{
241 u32 n_regval = 0, m_regval = 0, d_regval = 0;
242 u32 cfg_regval;
243 struct clk_freq_tbl *freq;
244 u32 cmd_rcgr_regval;
245
246 /* Is the root enabled? */
247 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
248 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
249 return HANDOFF_DISABLED_CLK;
250
251 /* Is there a pending configuration? */
252 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
253 return HANDOFF_UNKNOWN_RATE;
254
255 /* Get values of m, n, d, div and src_sel registers. */
256 if (has_mnd) {
257 m_regval = readl_relaxed(M_REG(rcg));
258 n_regval = readl_relaxed(N_REG(rcg));
259 d_regval = readl_relaxed(D_REG(rcg));
260
261 /*
262 * The n and d values stored in the frequency tables are sign
263 * extended to 32 bits. The n and d values in the registers are
264 * sign extended to 8 or 16 bits. Sign extend the values read
265 * from the registers so that they can be compared to the
266 * values in the frequency tables.
267 */
268 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
269 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
270 }
271
272 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
273 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
274 | MND_MODE_MASK;
275
276 /* If mnd counter is present, check if it's in use. */
277 has_mnd = (has_mnd) &&
278 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
279
280 /*
281 * Clear out the mn counter mode bits since we now want to compare only
282 * the source mux selection and pre-divider values in the registers.
283 */
284 cfg_regval &= ~MND_MODE_MASK;
285
286 /* Figure out what rate the rcg is running at */
287 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
288 if (freq->div_src_val != cfg_regval)
289 continue;
290 if (has_mnd) {
291 if (freq->m_val != m_regval)
292 continue;
293 if (freq->n_val != n_regval)
294 continue;
295 if (freq->d_val != d_regval)
296 continue;
297 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700298 break;
299 }
300
301 /* No known frequency found */
302 if (freq->freq_hz == FREQ_END)
303 return HANDOFF_UNKNOWN_RATE;
304
305 rcg->current_freq = freq;
306 rcg->c.rate = freq->freq_hz;
307
308 return HANDOFF_ENABLED_CLK;
309}
310
311static enum handoff rcg_mnd_clk_handoff(struct clk *c)
312{
313 return _rcg_clk_handoff(to_rcg_clk(c), 1);
314}
315
316static enum handoff rcg_clk_handoff(struct clk *c)
317{
318 return _rcg_clk_handoff(to_rcg_clk(c), 0);
319}
320
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700321#define BRANCH_CHECK_MASK BM(31, 28)
322#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
323#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
324#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
325
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700326/*
327 * Branch clock functions
328 */
329static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
330 void __iomem *cbcr_reg,
331 enum branch_state br_status)
332{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700333 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700334
335 /*
336 * Use a memory barrier since some halt status registers are
337 * not within the same 1K segment as the branch/root enable
338 * registers. It's also needed in the udelay() case to ensure
339 * the delay starts after the branch disable.
340 */
341 mb();
342
343 if (halt_check == DELAY || halt_check == HALT_VOTED) {
344 udelay(HALT_CHECK_DELAY_US);
345 } else if (halt_check == HALT) {
346 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700347 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700348 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700349 val = readl_relaxed(cbcr_reg);
350 val &= BRANCH_CHECK_MASK;
351 switch (br_status) {
352 case BRANCH_ON:
353 if (val == BRANCH_ON_VAL
354 || val == BRANCH_NOC_FSM_ON_VAL)
355 return;
356 break;
357
358 case BRANCH_OFF:
359 if (val == BRANCH_OFF_VAL)
360 return;
361 break;
362 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700363 udelay(1);
364 }
365 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
366 }
367}
368
369static int branch_clk_enable(struct clk *c)
370{
371 unsigned long flags;
372 u32 cbcr_val;
373 struct branch_clk *branch = to_branch_clk(c);
374
375 spin_lock_irqsave(&local_clock_reg_lock, flags);
376 cbcr_val = readl_relaxed(CBCR_REG(branch));
377 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
378 writel_relaxed(cbcr_val, CBCR_REG(branch));
379 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
380
381 /* Wait for clock to enable before continuing. */
382 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
383 CBCR_REG(branch), BRANCH_ON);
384
385 return 0;
386}
387
388static void branch_clk_disable(struct clk *c)
389{
390 unsigned long flags;
391 struct branch_clk *branch = to_branch_clk(c);
392 u32 reg_val;
393
394 spin_lock_irqsave(&local_clock_reg_lock, flags);
395 reg_val = readl_relaxed(CBCR_REG(branch));
396 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
397 writel_relaxed(reg_val, CBCR_REG(branch));
398 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
399
400 /* Wait for clock to disable before continuing. */
401 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
402 CBCR_REG(branch), BRANCH_OFF);
403}
404
405static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
406{
407 unsigned long flags;
408 u32 regval;
409
410 if (rate > branch->max_div)
411 return -EINVAL;
412
413 spin_lock_irqsave(&local_clock_reg_lock, flags);
414 regval = readl_relaxed(CBCR_REG(branch));
415 regval &= ~CBCR_BRANCH_CDIV_MASK;
416 regval |= CBCR_BRANCH_CDIV_MASKED(rate);
417 writel_relaxed(regval, CBCR_REG(branch));
418 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
419
420 return 0;
421}
422
423static int branch_clk_set_rate(struct clk *c, unsigned long rate)
424{
425 struct branch_clk *branch = to_branch_clk(c);
426
427 if (branch->max_div)
428 return branch_cdiv_set_rate(branch, rate);
429
430 if (!branch->has_sibling)
431 return clk_set_rate(branch->parent, rate);
432
433 return -EPERM;
434}
435
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700436static long branch_clk_round_rate(struct clk *c, unsigned long rate)
437{
438 struct branch_clk *branch = to_branch_clk(c);
439
440 if (branch->max_div)
441 return rate <= (branch->max_div) ? rate : -EPERM;
442
443 if (!branch->has_sibling)
444 return clk_round_rate(branch->parent, rate);
445
446 return -EPERM;
447}
448
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700449static unsigned long branch_clk_get_rate(struct clk *c)
450{
451 struct branch_clk *branch = to_branch_clk(c);
452
453 if (branch->max_div)
454 return branch->c.rate;
455
456 if (!branch->has_sibling)
457 return clk_get_rate(branch->parent);
458
459 return 0;
460}
461
462static struct clk *branch_clk_get_parent(struct clk *c)
463{
464 return to_branch_clk(c)->parent;
465}
466
467static int branch_clk_list_rate(struct clk *c, unsigned n)
468{
469 struct branch_clk *branch = to_branch_clk(c);
470
471 if (branch->has_sibling == 1)
472 return -ENXIO;
473
474 if (branch->parent)
475 return rcg_clk_list_rate(branch->parent, n);
476 else
477 return 0;
478}
479
480static enum handoff branch_clk_handoff(struct clk *c)
481{
482 struct branch_clk *branch = to_branch_clk(c);
483 u32 cbcr_regval;
484
485 cbcr_regval = readl_relaxed(CBCR_REG(branch));
486 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
487 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700488
489 if (branch->parent) {
490 if (branch->parent->ops->handoff)
491 return branch->parent->ops->handoff(branch->parent);
492 }
493
494 return HANDOFF_ENABLED_CLK;
495}
496
497static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700498 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700499{
500 int ret = 0;
501 unsigned long flags;
502 u32 reg_val;
503
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700504 spin_lock_irqsave(&local_clock_reg_lock, flags);
505 reg_val = readl_relaxed(bcr_reg);
506 switch (action) {
507 case CLK_RESET_ASSERT:
508 reg_val |= BCR_BLK_ARES_BIT;
509 break;
510 case CLK_RESET_DEASSERT:
511 reg_val &= ~BCR_BLK_ARES_BIT;
512 break;
513 default:
514 ret = -EINVAL;
515 }
516 writel_relaxed(reg_val, bcr_reg);
517 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
518
519 /* Make sure write is issued before returning. */
520 mb();
521
522 return ret;
523}
524
525static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
526{
527 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700528
529 if (!branch->bcr_reg) {
530 WARN("clk_reset called on an unsupported clock (%s)\n",
531 c->dbg_name);
532 return -EPERM;
533 }
534 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700535}
536
537/*
538 * Voteable clock functions
539 */
540static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
541{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700542 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700543
544 if (!vclk->bcr_reg) {
545 WARN("clk_reset called on an unsupported clock (%s)\n",
546 c->dbg_name);
547 return -EPERM;
548 }
549 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700550}
551
552static int local_vote_clk_enable(struct clk *c)
553{
554 unsigned long flags;
555 u32 ena;
556 struct local_vote_clk *vclk = to_local_vote_clk(c);
557
558 spin_lock_irqsave(&local_clock_reg_lock, flags);
559 ena = readl_relaxed(VOTE_REG(vclk));
560 ena |= vclk->en_mask;
561 writel_relaxed(ena, VOTE_REG(vclk));
562 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
563
564 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
565 BRANCH_ON);
566
567 return 0;
568}
569
570static void local_vote_clk_disable(struct clk *c)
571{
572 unsigned long flags;
573 u32 ena;
574 struct local_vote_clk *vclk = to_local_vote_clk(c);
575
576 spin_lock_irqsave(&local_clock_reg_lock, flags);
577 ena = readl_relaxed(VOTE_REG(vclk));
578 ena &= ~vclk->en_mask;
579 writel_relaxed(ena, VOTE_REG(vclk));
580 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
581}
582
583static enum handoff local_vote_clk_handoff(struct clk *c)
584{
585 struct local_vote_clk *vclk = to_local_vote_clk(c);
586 u32 vote_regval;
587
588 /* Is the branch voted on by apps? */
589 vote_regval = readl_relaxed(VOTE_REG(vclk));
590 if (!(vote_regval & vclk->en_mask))
591 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700592
593 return HANDOFF_ENABLED_CLK;
594}
595
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700596struct clk_ops clk_ops_empty;
597
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700598struct clk_ops clk_ops_rcg = {
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800599 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700600 .set_rate = rcg_clk_set_rate,
601 .list_rate = rcg_clk_list_rate,
602 .round_rate = rcg_clk_round_rate,
603 .get_parent = rcg_clk_get_parent,
604 .handoff = rcg_clk_handoff,
605};
606
607struct clk_ops clk_ops_rcg_mnd = {
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800608 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700609 .set_rate = rcg_clk_set_rate,
610 .list_rate = rcg_clk_list_rate,
611 .round_rate = rcg_clk_round_rate,
612 .get_parent = rcg_clk_get_parent,
613 .handoff = rcg_mnd_clk_handoff,
614};
615
616struct clk_ops clk_ops_branch = {
617 .enable = branch_clk_enable,
618 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700619 .set_rate = branch_clk_set_rate,
620 .get_rate = branch_clk_get_rate,
621 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700622 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700623 .reset = branch_clk_reset,
624 .get_parent = branch_clk_get_parent,
625 .handoff = branch_clk_handoff,
626};
627
628struct clk_ops clk_ops_vote = {
629 .enable = local_vote_clk_enable,
630 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700631 .reset = local_vote_clk_reset,
632 .handoff = local_vote_clk_handoff,
633};