blob: 1ae5f5018a97341af097074e8dc4450cd10f63ca [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
Vikram Mulukutla8810e342011-10-20 20:26:53 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19#include <linux/ctype.h>
20#include <linux/bitops.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <mach/clk.h>
Matt Wagantalld55b90f2012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Vikram Mulukutla8810e342011-10-20 20:26:53 -070028
Vikram Mulukutla8810e342011-10-20 20:26:53 -070029#include "clock-local2.h"
30
31/*
32 * When enabling/disabling a clock, check the halt bit up to this number
33 * number of times (with a 1 us delay in between) before continuing.
34 */
35#define HALT_CHECK_MAX_LOOPS 200
36/* For clock without halt checking, wait this long after enables/disables. */
37#define HALT_CHECK_DELAY_US 10
38
39/*
40 * When updating an RCG configuration, check the update bit up to this number
41 * number of times (with a 1 us delay in between) before continuing.
42 */
43#define UPDATE_CHECK_MAX_LOOPS 200
44
45DEFINE_SPINLOCK(local_clock_reg_lock);
46struct clk_freq_tbl rcg_dummy_freq = F_END;
47
48#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
49#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
50#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
51#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
52#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
53#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
54#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
55#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
56
57/*
58 * Important clock bit positions and masks
59 */
60#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
61#define CBCR_BRANCH_ENABLE_BIT BIT(0)
62#define CBCR_BRANCH_OFF_BIT BIT(31)
63#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
64#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
65#define BCR_BLK_ARES_BIT BIT(0)
66#define CBCR_HW_CTL_BIT BIT(1)
67#define CFG_RCGR_DIV_MASK BM(4, 0)
68#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
69#define MND_MODE_MASK BM(13, 12)
70#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
71#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
72#define CBCR_BRANCH_CDIV_MASK BM(24, 16)
73#define CBCR_BRANCH_CDIV_MASKED(val) BVAL(24, 16, (val));
74
75enum branch_state {
76 BRANCH_ON,
77 BRANCH_OFF,
78};
79
80/*
81 * RCG functions
82 */
83
84/*
85 * Update an RCG with a new configuration. This may include a new M, N, or D
86 * value, source selection or pre-divider value.
87 *
88 */
89static void rcg_update_config(struct rcg_clk *rcg)
90{
91 u32 cmd_rcgr_regval, count;
92
93 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
94 cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
95 writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
96
97 /* Wait for update to take effect */
98 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
99 if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
100 CMD_RCGR_CONFIG_UPDATE_BIT))
101 return;
102 udelay(1);
103 }
104
105 WARN(count == 0, "%s: rcg didn't update its configuration.",
106 rcg->c.dbg_name);
107}
108
109/* RCG set rate function for clocks with Half Integer Dividers. */
110void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
111{
112 u32 cfg_regval;
Vikram Mulukutla1e565d52012-08-20 22:23:02 -0700113 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700114
Vikram Mulukutla1e565d52012-08-20 22:23:02 -0700115 spin_lock_irqsave(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700116 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
117 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
118 cfg_regval |= nf->div_src_val;
119 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
120
121 rcg_update_config(rcg);
Vikram Mulukutla1e565d52012-08-20 22:23:02 -0700122 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700123}
124
125/* RCG set rate function for clocks with MND & Half Integer Dividers. */
126void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
127{
128 u32 cfg_regval;
Vikram Mulukutla1e565d52012-08-20 22:23:02 -0700129 unsigned long flags;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700130
Vikram Mulukutla1e565d52012-08-20 22:23:02 -0700131 spin_lock_irqsave(&local_clock_reg_lock, flags);
132 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700133 writel_relaxed(nf->m_val, M_REG(rcg));
134 writel_relaxed(nf->n_val, N_REG(rcg));
135 writel_relaxed(nf->d_val, D_REG(rcg));
136
137 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
138 cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
139 cfg_regval |= nf->div_src_val;
140
141 /* Activate or disable the M/N:D divider as necessary */
142 cfg_regval &= ~MND_MODE_MASK;
143 if (nf->n_val != 0)
144 cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
145 writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
146
147 rcg_update_config(rcg);
Vikram Mulukutla1e565d52012-08-20 22:23:02 -0700148 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700149}
150
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800151static int rcg_clk_prepare(struct clk *c)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700152{
153 struct rcg_clk *rcg = to_rcg_clk(c);
154
155 WARN(rcg->current_freq == &rcg_dummy_freq,
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800156 "Attempting to prepare %s before setting its rate. "
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700157 "Set the rate first!\n", rcg->c.dbg_name);
158
159 return 0;
160}
161
162static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
163{
164 struct clk_freq_tbl *cf, *nf;
165 struct rcg_clk *rcg = to_rcg_clk(c);
Vikram Mulukutla1e565d52012-08-20 22:23:02 -0700166 int rc = 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700167
168 for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
169 && nf->freq_hz != rate; nf++)
170 ;
171
172 if (nf->freq_hz == FREQ_END)
173 return -EINVAL;
174
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700175 cf = rcg->current_freq;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700176
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800177 /* Enable source clock dependency for the new freq. */
178 if (c->prepare_count) {
179 rc = clk_prepare(nf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700180 if (rc)
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800181 return rc;
182 }
183
184 spin_lock_irqsave(&c->lock, flags);
185 if (c->count) {
186 rc = clk_enable(nf->src_clk);
187 if (rc) {
188 spin_unlock_irqrestore(&c->lock, flags);
189 clk_unprepare(nf->src_clk);
190 return rc;
191 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700192 }
193
194 BUG_ON(!rcg->set_rate);
195
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700196 /* Perform clock-specific frequency switch operations. */
197 rcg->set_rate(rcg, nf);
198
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700199 /* Release source requirements of the old freq. */
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800200 if (c->count)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700201 clk_disable(cf->src_clk);
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800202 spin_unlock_irqrestore(&c->lock, flags);
203
204 if (c->prepare_count)
205 clk_unprepare(cf->src_clk);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700206
207 rcg->current_freq = nf;
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800208
209 return 0;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700210}
211
212/* Return a supported rate that's at least the specified rate. */
213static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
214{
215 struct rcg_clk *rcg = to_rcg_clk(c);
216 struct clk_freq_tbl *f;
217
218 for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
219 if (f->freq_hz >= rate)
220 return f->freq_hz;
221
222 return -EPERM;
223}
224
225/* Return the nth supported frequency for a given clock. */
226static int rcg_clk_list_rate(struct clk *c, unsigned n)
227{
228 struct rcg_clk *rcg = to_rcg_clk(c);
229
230 if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
231 return -ENXIO;
232
233 return (rcg->freq_tbl + n)->freq_hz;
234}
235
236static struct clk *rcg_clk_get_parent(struct clk *c)
237{
238 return to_rcg_clk(c)->current_freq->src_clk;
239}
240
241static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg, int has_mnd)
242{
243 u32 n_regval = 0, m_regval = 0, d_regval = 0;
244 u32 cfg_regval;
245 struct clk_freq_tbl *freq;
246 u32 cmd_rcgr_regval;
247
248 /* Is the root enabled? */
249 cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
250 if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
251 return HANDOFF_DISABLED_CLK;
252
253 /* Is there a pending configuration? */
254 if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
255 return HANDOFF_UNKNOWN_RATE;
256
257 /* Get values of m, n, d, div and src_sel registers. */
258 if (has_mnd) {
259 m_regval = readl_relaxed(M_REG(rcg));
260 n_regval = readl_relaxed(N_REG(rcg));
261 d_regval = readl_relaxed(D_REG(rcg));
262
263 /*
264 * The n and d values stored in the frequency tables are sign
265 * extended to 32 bits. The n and d values in the registers are
266 * sign extended to 8 or 16 bits. Sign extend the values read
267 * from the registers so that they can be compared to the
268 * values in the frequency tables.
269 */
270 n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
271 d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
272 }
273
274 cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
275 cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
276 | MND_MODE_MASK;
277
278 /* If mnd counter is present, check if it's in use. */
279 has_mnd = (has_mnd) &&
280 ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
281
282 /*
283 * Clear out the mn counter mode bits since we now want to compare only
284 * the source mux selection and pre-divider values in the registers.
285 */
286 cfg_regval &= ~MND_MODE_MASK;
287
288 /* Figure out what rate the rcg is running at */
289 for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
290 if (freq->div_src_val != cfg_regval)
291 continue;
292 if (has_mnd) {
293 if (freq->m_val != m_regval)
294 continue;
295 if (freq->n_val != n_regval)
296 continue;
297 if (freq->d_val != d_regval)
298 continue;
299 }
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700300 break;
301 }
302
303 /* No known frequency found */
304 if (freq->freq_hz == FREQ_END)
305 return HANDOFF_UNKNOWN_RATE;
306
307 rcg->current_freq = freq;
308 rcg->c.rate = freq->freq_hz;
309
310 return HANDOFF_ENABLED_CLK;
311}
312
313static enum handoff rcg_mnd_clk_handoff(struct clk *c)
314{
315 return _rcg_clk_handoff(to_rcg_clk(c), 1);
316}
317
318static enum handoff rcg_clk_handoff(struct clk *c)
319{
320 return _rcg_clk_handoff(to_rcg_clk(c), 0);
321}
322
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700323#define BRANCH_CHECK_MASK BM(31, 28)
324#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
325#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
326#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
327
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700328/*
329 * Branch clock functions
330 */
331static void branch_clk_halt_check(u32 halt_check, const char *clk_name,
332 void __iomem *cbcr_reg,
333 enum branch_state br_status)
334{
Vikram Mulukutla86b9fa62012-05-02 16:39:14 -0700335 char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700336
337 /*
338 * Use a memory barrier since some halt status registers are
339 * not within the same 1K segment as the branch/root enable
340 * registers. It's also needed in the udelay() case to ensure
341 * the delay starts after the branch disable.
342 */
343 mb();
344
345 if (halt_check == DELAY || halt_check == HALT_VOTED) {
346 udelay(HALT_CHECK_DELAY_US);
347 } else if (halt_check == HALT) {
348 int count;
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700349 u32 val;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700350 for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
Vikram Mulukutla4be69082012-06-28 18:40:34 -0700351 val = readl_relaxed(cbcr_reg);
352 val &= BRANCH_CHECK_MASK;
353 switch (br_status) {
354 case BRANCH_ON:
355 if (val == BRANCH_ON_VAL
356 || val == BRANCH_NOC_FSM_ON_VAL)
357 return;
358 break;
359
360 case BRANCH_OFF:
361 if (val == BRANCH_OFF_VAL)
362 return;
363 break;
364 };
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700365 udelay(1);
366 }
367 WARN(count == 0, "%s status stuck %s", clk_name, status_str);
368 }
369}
370
371static int branch_clk_enable(struct clk *c)
372{
373 unsigned long flags;
374 u32 cbcr_val;
375 struct branch_clk *branch = to_branch_clk(c);
376
377 spin_lock_irqsave(&local_clock_reg_lock, flags);
378 cbcr_val = readl_relaxed(CBCR_REG(branch));
379 cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
380 writel_relaxed(cbcr_val, CBCR_REG(branch));
381 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
382
383 /* Wait for clock to enable before continuing. */
384 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
385 CBCR_REG(branch), BRANCH_ON);
386
387 return 0;
388}
389
390static void branch_clk_disable(struct clk *c)
391{
392 unsigned long flags;
393 struct branch_clk *branch = to_branch_clk(c);
394 u32 reg_val;
395
396 spin_lock_irqsave(&local_clock_reg_lock, flags);
397 reg_val = readl_relaxed(CBCR_REG(branch));
398 reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
399 writel_relaxed(reg_val, CBCR_REG(branch));
400 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
401
402 /* Wait for clock to disable before continuing. */
403 branch_clk_halt_check(branch->halt_check, branch->c.dbg_name,
404 CBCR_REG(branch), BRANCH_OFF);
405}
406
407static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
408{
409 unsigned long flags;
410 u32 regval;
411
412 if (rate > branch->max_div)
413 return -EINVAL;
414
415 spin_lock_irqsave(&local_clock_reg_lock, flags);
416 regval = readl_relaxed(CBCR_REG(branch));
417 regval &= ~CBCR_BRANCH_CDIV_MASK;
418 regval |= CBCR_BRANCH_CDIV_MASKED(rate);
419 writel_relaxed(regval, CBCR_REG(branch));
420 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
421
422 return 0;
423}
424
425static int branch_clk_set_rate(struct clk *c, unsigned long rate)
426{
427 struct branch_clk *branch = to_branch_clk(c);
428
429 if (branch->max_div)
430 return branch_cdiv_set_rate(branch, rate);
431
432 if (!branch->has_sibling)
433 return clk_set_rate(branch->parent, rate);
434
435 return -EPERM;
436}
437
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700438static long branch_clk_round_rate(struct clk *c, unsigned long rate)
439{
440 struct branch_clk *branch = to_branch_clk(c);
441
442 if (branch->max_div)
443 return rate <= (branch->max_div) ? rate : -EPERM;
444
445 if (!branch->has_sibling)
446 return clk_round_rate(branch->parent, rate);
447
448 return -EPERM;
449}
450
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700451static unsigned long branch_clk_get_rate(struct clk *c)
452{
453 struct branch_clk *branch = to_branch_clk(c);
454
455 if (branch->max_div)
456 return branch->c.rate;
457
458 if (!branch->has_sibling)
459 return clk_get_rate(branch->parent);
460
461 return 0;
462}
463
464static struct clk *branch_clk_get_parent(struct clk *c)
465{
466 return to_branch_clk(c)->parent;
467}
468
469static int branch_clk_list_rate(struct clk *c, unsigned n)
470{
471 struct branch_clk *branch = to_branch_clk(c);
472
473 if (branch->has_sibling == 1)
474 return -ENXIO;
475
476 if (branch->parent)
477 return rcg_clk_list_rate(branch->parent, n);
478 else
479 return 0;
480}
481
482static enum handoff branch_clk_handoff(struct clk *c)
483{
484 struct branch_clk *branch = to_branch_clk(c);
485 u32 cbcr_regval;
486
487 cbcr_regval = readl_relaxed(CBCR_REG(branch));
488 if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
489 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700490
491 if (branch->parent) {
492 if (branch->parent->ops->handoff)
493 return branch->parent->ops->handoff(branch->parent);
494 }
495
496 return HANDOFF_ENABLED_CLK;
497}
498
499static int __branch_clk_reset(void __iomem *bcr_reg,
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700500 enum clk_reset_action action)
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700501{
502 int ret = 0;
503 unsigned long flags;
504 u32 reg_val;
505
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700506 spin_lock_irqsave(&local_clock_reg_lock, flags);
507 reg_val = readl_relaxed(bcr_reg);
508 switch (action) {
509 case CLK_RESET_ASSERT:
510 reg_val |= BCR_BLK_ARES_BIT;
511 break;
512 case CLK_RESET_DEASSERT:
513 reg_val &= ~BCR_BLK_ARES_BIT;
514 break;
515 default:
516 ret = -EINVAL;
517 }
518 writel_relaxed(reg_val, bcr_reg);
519 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
520
521 /* Make sure write is issued before returning. */
522 mb();
523
524 return ret;
525}
526
527static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
528{
529 struct branch_clk *branch = to_branch_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700530
531 if (!branch->bcr_reg) {
532 WARN("clk_reset called on an unsupported clock (%s)\n",
533 c->dbg_name);
534 return -EPERM;
535 }
536 return __branch_clk_reset(BCR_REG(branch), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700537}
538
539/*
540 * Voteable clock functions
541 */
542static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
543{
Vikram Mulukutla27784c02012-06-06 13:37:36 -0700544 struct local_vote_clk *vclk = to_local_vote_clk(c);
Vikram Mulukutla21c6b912012-06-14 14:18:45 -0700545
546 if (!vclk->bcr_reg) {
547 WARN("clk_reset called on an unsupported clock (%s)\n",
548 c->dbg_name);
549 return -EPERM;
550 }
551 return __branch_clk_reset(BCR_REG(vclk), action);
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700552}
553
554static int local_vote_clk_enable(struct clk *c)
555{
556 unsigned long flags;
557 u32 ena;
558 struct local_vote_clk *vclk = to_local_vote_clk(c);
559
560 spin_lock_irqsave(&local_clock_reg_lock, flags);
561 ena = readl_relaxed(VOTE_REG(vclk));
562 ena |= vclk->en_mask;
563 writel_relaxed(ena, VOTE_REG(vclk));
564 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
565
566 branch_clk_halt_check(vclk->halt_check, c->dbg_name, CBCR_REG(vclk),
567 BRANCH_ON);
568
569 return 0;
570}
571
572static void local_vote_clk_disable(struct clk *c)
573{
574 unsigned long flags;
575 u32 ena;
576 struct local_vote_clk *vclk = to_local_vote_clk(c);
577
578 spin_lock_irqsave(&local_clock_reg_lock, flags);
579 ena = readl_relaxed(VOTE_REG(vclk));
580 ena &= ~vclk->en_mask;
581 writel_relaxed(ena, VOTE_REG(vclk));
582 spin_unlock_irqrestore(&local_clock_reg_lock, flags);
583}
584
585static enum handoff local_vote_clk_handoff(struct clk *c)
586{
587 struct local_vote_clk *vclk = to_local_vote_clk(c);
588 u32 vote_regval;
589
590 /* Is the branch voted on by apps? */
591 vote_regval = readl_relaxed(VOTE_REG(vclk));
592 if (!(vote_regval & vclk->en_mask))
593 return HANDOFF_DISABLED_CLK;
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700594
595 return HANDOFF_ENABLED_CLK;
596}
597
Matt Wagantalledf2fad2012-08-06 16:11:46 -0700598struct clk_ops clk_ops_empty;
599
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700600struct clk_ops clk_ops_rcg = {
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800601 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700602 .set_rate = rcg_clk_set_rate,
603 .list_rate = rcg_clk_list_rate,
604 .round_rate = rcg_clk_round_rate,
605 .get_parent = rcg_clk_get_parent,
606 .handoff = rcg_clk_handoff,
607};
608
609struct clk_ops clk_ops_rcg_mnd = {
Stephen Boyd2c2875f2012-01-24 17:36:34 -0800610 .enable = rcg_clk_prepare,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700611 .set_rate = rcg_clk_set_rate,
612 .list_rate = rcg_clk_list_rate,
613 .round_rate = rcg_clk_round_rate,
614 .get_parent = rcg_clk_get_parent,
615 .handoff = rcg_mnd_clk_handoff,
616};
617
618struct clk_ops clk_ops_branch = {
619 .enable = branch_clk_enable,
620 .disable = branch_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700621 .set_rate = branch_clk_set_rate,
622 .get_rate = branch_clk_get_rate,
623 .list_rate = branch_clk_list_rate,
Vikram Mulukutla5c3252d2012-05-25 12:28:54 -0700624 .round_rate = branch_clk_round_rate,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700625 .reset = branch_clk_reset,
626 .get_parent = branch_clk_get_parent,
627 .handoff = branch_clk_handoff,
628};
629
630struct clk_ops clk_ops_vote = {
631 .enable = local_vote_clk_enable,
632 .disable = local_vote_clk_disable,
Vikram Mulukutla8810e342011-10-20 20:26:53 -0700633 .reset = local_vote_clk_reset,
634 .handoff = local_vote_clk_handoff,
635};