blob: de89382937cd58a1534fb46a90f5a9f661072066 [file] [log] [blame]
Brian Swetland600f7cf2008-09-09 11:04:14 -07001/* arch/arm/mach-msm/clock.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
Brian Swetland600f7cf2008-09-09 11:04:14 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Brian Swetland600f7cf2008-09-09 11:04:14 -070017#include <linux/kernel.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070018#include <linux/err.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070019#include <linux/spinlock.h>
Stephen Boydbd323442011-02-23 09:37:42 -080020#include <linux/string.h>
21#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/clk.h>
Stephen Boydbd323442011-02-23 09:37:42 -080023#include <linux/clkdev.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070024
25#include "clock.h"
Brian Swetland600f7cf2008-09-09 11:04:14 -070026
Matt Wagantalle18bbc82011-10-06 10:07:28 -070027/* Find the voltage level required for a given rate. */
28static int find_vdd_level(struct clk *clk, unsigned long rate)
29{
30 int level;
31
32 for (level = 0; level < ARRAY_SIZE(clk->fmax); level++)
33 if (rate <= clk->fmax[level])
34 break;
35
36 if (level == ARRAY_SIZE(clk->fmax)) {
37 pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
38 clk->dbg_name);
39 return -EINVAL;
40 }
41
42 return level;
43}
44
45/* Update voltage level given the current votes. */
46static int update_vdd(struct clk_vdd_class *vdd_class)
47{
48 int level, rc;
49
50 for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--)
51 if (vdd_class->level_votes[level])
52 break;
53
54 if (level == vdd_class->cur_level)
55 return 0;
56
57 rc = vdd_class->set_vdd(vdd_class, level);
58 if (!rc)
59 vdd_class->cur_level = level;
60
61 return rc;
62}
63
64/* Vote for a voltage level. */
65int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
66{
67 unsigned long flags;
68 int rc;
69
70 spin_lock_irqsave(&vdd_class->lock, flags);
71 vdd_class->level_votes[level]++;
72 rc = update_vdd(vdd_class);
73 if (rc)
74 vdd_class->level_votes[level]--;
75 spin_unlock_irqrestore(&vdd_class->lock, flags);
76
77 return rc;
78}
79
80/* Remove vote for a voltage level. */
81int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
82{
83 unsigned long flags;
84 int rc = 0;
85
86 spin_lock_irqsave(&vdd_class->lock, flags);
87 if (WARN(!vdd_class->level_votes[level],
88 "Reference counts are incorrect for %s level %d\n",
89 vdd_class->class_name, level))
90 goto out;
91 vdd_class->level_votes[level]--;
92 rc = update_vdd(vdd_class);
93 if (rc)
94 vdd_class->level_votes[level]++;
95out:
96 spin_unlock_irqrestore(&vdd_class->lock, flags);
97 return rc;
98}
99
100/* Vote for a voltage level corresponding to a clock's rate. */
101static int vote_rate_vdd(struct clk *clk, unsigned long rate)
102{
103 int level;
104
105 if (!clk->vdd_class)
106 return 0;
107
108 level = find_vdd_level(clk, rate);
109 if (level < 0)
110 return level;
111
112 return vote_vdd_level(clk->vdd_class, level);
113}
114
115/* Remove vote for a voltage level corresponding to a clock's rate. */
116static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
117{
118 int level;
119
120 if (!clk->vdd_class)
121 return;
122
123 level = find_vdd_level(clk, rate);
124 if (level < 0)
125 return;
126
127 unvote_vdd_level(clk->vdd_class, level);
128}
129
Brian Swetland600f7cf2008-09-09 11:04:14 -0700130/*
Brian Swetland600f7cf2008-09-09 11:04:14 -0700131 * Standard clock functions defined in include/linux/clk.h
132 */
Brian Swetland600f7cf2008-09-09 11:04:14 -0700133int clk_enable(struct clk *clk)
134{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 int ret = 0;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700136 unsigned long flags, rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 struct clk *parent;
138
139 if (!clk)
140 return 0;
141
142 spin_lock_irqsave(&clk->lock, flags);
143 if (clk->count == 0) {
144 parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700145 rate = clk_get_rate(clk);
146
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147 ret = clk_enable(parent);
148 if (ret)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700149 goto err_enable_parent;
Stephen Boyd7fa26742011-08-11 23:22:29 -0700150 ret = clk_enable(clk->depends);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700151 if (ret)
152 goto err_enable_depends;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700154 ret = vote_rate_vdd(clk, rate);
155 if (ret)
156 goto err_vote_vdd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 if (clk->ops->enable)
158 ret = clk->ops->enable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700159 if (ret)
160 goto err_enable_clock;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700161 } else if (clk->flags & CLKFLAG_HANDOFF_RATE) {
162 /*
163 * The clock was already enabled by handoff code so there is no
164 * need to enable it again here. Clearing the handoff flag will
165 * prevent the lateinit handoff code from disabling the clock if
166 * a client driver still has it enabled.
167 */
168 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
169 goto out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700171 clk->count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172out:
173 spin_unlock_irqrestore(&clk->lock, flags);
174
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700175 return 0;
176
177err_enable_clock:
178 unvote_rate_vdd(clk, rate);
179err_vote_vdd:
180 clk_disable(clk->depends);
181err_enable_depends:
182 clk_disable(parent);
183err_enable_parent:
184 spin_unlock_irqrestore(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700186}
187EXPORT_SYMBOL(clk_enable);
188
189void clk_disable(struct clk *clk)
190{
191 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192
193 if (!clk)
194 return;
195
196 spin_lock_irqsave(&clk->lock, flags);
Stephen Boydd906b522011-07-26 10:51:41 -0700197 if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198 goto out;
199 if (clk->count == 1) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700200 struct clk *parent = clk_get_parent(clk);
201 unsigned long rate = clk_get_rate(clk);
202
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 if (clk->ops->disable)
204 clk->ops->disable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700205 unvote_rate_vdd(clk, rate);
Stephen Boyd7fa26742011-08-11 23:22:29 -0700206 clk_disable(clk->depends);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 clk_disable(parent);
208 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700209 clk->count--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210out:
211 spin_unlock_irqrestore(&clk->lock, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700212}
213EXPORT_SYMBOL(clk_disable);
214
Daniel Walker5e96da52010-05-12 13:43:28 -0700215int clk_reset(struct clk *clk, enum clk_reset_action action)
216{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 if (!clk->ops->reset)
218 return -ENOSYS;
219
220 return clk->ops->reset(clk, action);
Daniel Walker5e96da52010-05-12 13:43:28 -0700221}
222EXPORT_SYMBOL(clk_reset);
223
Brian Swetland600f7cf2008-09-09 11:04:14 -0700224unsigned long clk_get_rate(struct clk *clk)
225{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 if (!clk->ops->get_rate)
227 return 0;
228
229 return clk->ops->get_rate(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700230}
231EXPORT_SYMBOL(clk_get_rate);
232
233int clk_set_rate(struct clk *clk, unsigned long rate)
234{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700235 unsigned long start_rate, flags;
236 int rc;
237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 if (!clk->ops->set_rate)
239 return -ENOSYS;
Daniel Walker3a790bb2010-12-13 14:35:10 -0800240
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700241 spin_lock_irqsave(&clk->lock, flags);
242 if (clk->count) {
243 start_rate = clk_get_rate(clk);
244 /* Enforce vdd requirements for target frequency. */
245 rc = vote_rate_vdd(clk, rate);
246 if (rc)
247 goto err_vote_vdd;
248 rc = clk->ops->set_rate(clk, rate);
249 if (rc)
250 goto err_set_rate;
251 /* Release vdd requirements for starting frequency. */
252 unvote_rate_vdd(clk, start_rate);
253 } else {
254 rc = clk->ops->set_rate(clk, rate);
255 }
256 spin_unlock_irqrestore(&clk->lock, flags);
257 return rc;
258
259err_set_rate:
260 unvote_rate_vdd(clk, rate);
261err_vote_vdd:
262 spin_unlock_irqrestore(&clk->lock, flags);
263 return rc;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700264}
265EXPORT_SYMBOL(clk_set_rate);
266
Daniel Walker5e96da52010-05-12 13:43:28 -0700267long clk_round_rate(struct clk *clk, unsigned long rate)
268{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 if (!clk->ops->round_rate)
270 return -ENOSYS;
271
272 return clk->ops->round_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700273}
274EXPORT_SYMBOL(clk_round_rate);
275
276int clk_set_min_rate(struct clk *clk, unsigned long rate)
277{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 if (!clk->ops->set_min_rate)
279 return -ENOSYS;
280
281 return clk->ops->set_min_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700282}
283EXPORT_SYMBOL(clk_set_min_rate);
284
285int clk_set_max_rate(struct clk *clk, unsigned long rate)
286{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 if (!clk->ops->set_max_rate)
288 return -ENOSYS;
289
290 return clk->ops->set_max_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700291}
292EXPORT_SYMBOL(clk_set_max_rate);
293
Brian Swetland600f7cf2008-09-09 11:04:14 -0700294int clk_set_parent(struct clk *clk, struct clk *parent)
295{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 if (!clk->ops->set_parent)
297 return 0;
298
299 return clk->ops->set_parent(clk, parent);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700300}
301EXPORT_SYMBOL(clk_set_parent);
302
303struct clk *clk_get_parent(struct clk *clk)
304{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305 if (!clk->ops->get_parent)
306 return NULL;
307
308 return clk->ops->get_parent(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700309}
310EXPORT_SYMBOL(clk_get_parent);
311
312int clk_set_flags(struct clk *clk, unsigned long flags)
313{
314 if (clk == NULL || IS_ERR(clk))
315 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 if (!clk->ops->set_flags)
317 return -ENOSYS;
318
319 return clk->ops->set_flags(clk, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700320}
321EXPORT_SYMBOL(clk_set_flags);
322
Stephen Boydbb600ae2011-08-02 20:11:40 -0700323static struct clock_init_data __initdata *clk_init_data;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700324
Stephen Boydbb600ae2011-08-02 20:11:40 -0700325void __init msm_clock_init(struct clock_init_data *data)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700326{
327 unsigned n;
Stephen Boyd94625ef2011-07-12 17:06:01 -0700328 struct clk_lookup *clock_tbl;
329 size_t num_clocks;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700330
331 clk_init_data = data;
332 if (clk_init_data->init)
333 clk_init_data->init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700334
Stephen Boyd94625ef2011-07-12 17:06:01 -0700335 clock_tbl = data->table;
336 num_clocks = data->size;
337
Stephen Boydbd323442011-02-23 09:37:42 -0800338 for (n = 0; n < num_clocks; n++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 struct clk *clk = clock_tbl[n].clk;
340 struct clk *parent = clk_get_parent(clk);
341 clk_set_parent(clk, parent);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700342 if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE)) {
343 if (clk->ops->handoff(clk)) {
344 clk->flags |= CLKFLAG_HANDOFF_RATE;
345 clk_enable(clk);
346 }
347 }
Stephen Boydbd323442011-02-23 09:37:42 -0800348 }
Daniel Walker5e96da52010-05-12 13:43:28 -0700349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 clkdev_add_table(clock_tbl, num_clocks);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700351}
352
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353/*
354 * The bootloader and/or AMSS may have left various clocks enabled.
355 * Disable any clocks that have not been explicitly enabled by a
356 * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
Brian Swetland600f7cf2008-09-09 11:04:14 -0700357 */
358static int __init clock_late_init(void)
359{
Stephen Boydbb600ae2011-08-02 20:11:40 -0700360 unsigned n, count = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700361 unsigned long flags;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700362 int ret = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700363
Stephen Boydbb600ae2011-08-02 20:11:40 -0700364 clock_debug_init(clk_init_data);
365 for (n = 0; n < clk_init_data->size; n++) {
366 struct clk *clk = clk_init_data->table[n].clk;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700367 bool handoff = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800369 clock_debug_add(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
371 spin_lock_irqsave(&clk->lock, flags);
372 if (!clk->count && clk->ops->auto_off) {
Brian Swetland600f7cf2008-09-09 11:04:14 -0700373 count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 clk->ops->auto_off(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700375 }
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700376 if (clk->flags & CLKFLAG_HANDOFF_RATE) {
377 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
378 handoff = true;
379 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380 spin_unlock_irqrestore(&clk->lock, flags);
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700381 /*
382 * Calling clk_disable() outside the lock is safe since
383 * it doesn't need to be atomic with the flag change.
384 */
385 if (handoff)
386 clk_disable(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700387 }
388 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700389 pr_info("clock_late_init() disabled %d unused clocks\n", count);
Stephen Boydbb600ae2011-08-02 20:11:40 -0700390 if (clk_init_data->late_init)
391 ret = clk_init_data->late_init();
392 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700393}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700394late_initcall(clock_late_init);