blob: 4a3ea81da4c08f857b478624625c273a45d15bcf [file] [log] [blame]
Brian Swetland600f7cf2008-09-09 11:04:14 -07001/* arch/arm/mach-msm/clock.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
Brian Swetland600f7cf2008-09-09 11:04:14 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Brian Swetland600f7cf2008-09-09 11:04:14 -070017#include <linux/kernel.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070018#include <linux/err.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070019#include <linux/spinlock.h>
Stephen Boydbd323442011-02-23 09:37:42 -080020#include <linux/string.h>
21#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/clk.h>
Stephen Boydbd323442011-02-23 09:37:42 -080023#include <linux/clkdev.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070024
25#include "clock.h"
Brian Swetland600f7cf2008-09-09 11:04:14 -070026
Matt Wagantalle18bbc82011-10-06 10:07:28 -070027/* Find the voltage level required for a given rate. */
28static int find_vdd_level(struct clk *clk, unsigned long rate)
29{
30 int level;
31
32 for (level = 0; level < ARRAY_SIZE(clk->fmax); level++)
33 if (rate <= clk->fmax[level])
34 break;
35
36 if (level == ARRAY_SIZE(clk->fmax)) {
37 pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
38 clk->dbg_name);
39 return -EINVAL;
40 }
41
42 return level;
43}
44
45/* Update voltage level given the current votes. */
46static int update_vdd(struct clk_vdd_class *vdd_class)
47{
48 int level, rc;
49
50 for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--)
51 if (vdd_class->level_votes[level])
52 break;
53
54 if (level == vdd_class->cur_level)
55 return 0;
56
57 rc = vdd_class->set_vdd(vdd_class, level);
58 if (!rc)
59 vdd_class->cur_level = level;
60
61 return rc;
62}
63
64/* Vote for a voltage level. */
65int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
66{
67 unsigned long flags;
68 int rc;
69
70 spin_lock_irqsave(&vdd_class->lock, flags);
71 vdd_class->level_votes[level]++;
72 rc = update_vdd(vdd_class);
73 if (rc)
74 vdd_class->level_votes[level]--;
75 spin_unlock_irqrestore(&vdd_class->lock, flags);
76
77 return rc;
78}
79
80/* Remove vote for a voltage level. */
81int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
82{
83 unsigned long flags;
84 int rc = 0;
85
86 spin_lock_irqsave(&vdd_class->lock, flags);
87 if (WARN(!vdd_class->level_votes[level],
88 "Reference counts are incorrect for %s level %d\n",
89 vdd_class->class_name, level))
90 goto out;
91 vdd_class->level_votes[level]--;
92 rc = update_vdd(vdd_class);
93 if (rc)
94 vdd_class->level_votes[level]++;
95out:
96 spin_unlock_irqrestore(&vdd_class->lock, flags);
97 return rc;
98}
99
100/* Vote for a voltage level corresponding to a clock's rate. */
101static int vote_rate_vdd(struct clk *clk, unsigned long rate)
102{
103 int level;
104
105 if (!clk->vdd_class)
106 return 0;
107
108 level = find_vdd_level(clk, rate);
109 if (level < 0)
110 return level;
111
112 return vote_vdd_level(clk->vdd_class, level);
113}
114
115/* Remove vote for a voltage level corresponding to a clock's rate. */
116static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
117{
118 int level;
119
120 if (!clk->vdd_class)
121 return;
122
123 level = find_vdd_level(clk, rate);
124 if (level < 0)
125 return;
126
127 unvote_vdd_level(clk->vdd_class, level);
128}
129
Brian Swetland600f7cf2008-09-09 11:04:14 -0700130/*
Brian Swetland600f7cf2008-09-09 11:04:14 -0700131 * Standard clock functions defined in include/linux/clk.h
132 */
Brian Swetland600f7cf2008-09-09 11:04:14 -0700133int clk_enable(struct clk *clk)
134{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 int ret = 0;
Matt Wagantall7205eea2011-11-04 17:31:29 -0700136 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 struct clk *parent;
138
139 if (!clk)
140 return 0;
141
142 spin_lock_irqsave(&clk->lock, flags);
143 if (clk->count == 0) {
144 parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 ret = clk_enable(parent);
147 if (ret)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700148 goto err_enable_parent;
Stephen Boyd7fa26742011-08-11 23:22:29 -0700149 ret = clk_enable(clk->depends);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700150 if (ret)
151 goto err_enable_depends;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152
Matt Wagantall7205eea2011-11-04 17:31:29 -0700153 ret = vote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700154 if (ret)
155 goto err_vote_vdd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156 if (clk->ops->enable)
157 ret = clk->ops->enable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700158 if (ret)
159 goto err_enable_clock;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700160 } else if (clk->flags & CLKFLAG_HANDOFF_RATE) {
161 /*
162 * The clock was already enabled by handoff code so there is no
163 * need to enable it again here. Clearing the handoff flag will
164 * prevent the lateinit handoff code from disabling the clock if
165 * a client driver still has it enabled.
166 */
167 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
168 goto out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700170 clk->count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171out:
172 spin_unlock_irqrestore(&clk->lock, flags);
173
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700174 return 0;
175
176err_enable_clock:
Matt Wagantall7205eea2011-11-04 17:31:29 -0700177 unvote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700178err_vote_vdd:
179 clk_disable(clk->depends);
180err_enable_depends:
181 clk_disable(parent);
182err_enable_parent:
183 spin_unlock_irqrestore(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700185}
186EXPORT_SYMBOL(clk_enable);
187
188void clk_disable(struct clk *clk)
189{
190 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191
192 if (!clk)
193 return;
194
195 spin_lock_irqsave(&clk->lock, flags);
Stephen Boydd906b522011-07-26 10:51:41 -0700196 if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 goto out;
198 if (clk->count == 1) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700199 struct clk *parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 if (clk->ops->disable)
202 clk->ops->disable(clk);
Matt Wagantall7205eea2011-11-04 17:31:29 -0700203 unvote_rate_vdd(clk, clk->rate);
Stephen Boyd7fa26742011-08-11 23:22:29 -0700204 clk_disable(clk->depends);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 clk_disable(parent);
206 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700207 clk->count--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208out:
209 spin_unlock_irqrestore(&clk->lock, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700210}
211EXPORT_SYMBOL(clk_disable);
212
Daniel Walker5e96da52010-05-12 13:43:28 -0700213int clk_reset(struct clk *clk, enum clk_reset_action action)
214{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 if (!clk->ops->reset)
216 return -ENOSYS;
217
218 return clk->ops->reset(clk, action);
Daniel Walker5e96da52010-05-12 13:43:28 -0700219}
220EXPORT_SYMBOL(clk_reset);
221
Brian Swetland600f7cf2008-09-09 11:04:14 -0700222unsigned long clk_get_rate(struct clk *clk)
223{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 if (!clk->ops->get_rate)
225 return 0;
226
227 return clk->ops->get_rate(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700228}
229EXPORT_SYMBOL(clk_get_rate);
230
Matt Wagantallca8ff552011-11-03 18:46:52 -0700231static int _clk_set_rate(struct clk *clk, unsigned long rate,
232 int (*set_fn)(struct clk *, unsigned))
Brian Swetland600f7cf2008-09-09 11:04:14 -0700233{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700234 unsigned long start_rate, flags;
235 int rc;
236
Matt Wagantallca8ff552011-11-03 18:46:52 -0700237 if (!set_fn)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 return -ENOSYS;
Daniel Walker3a790bb2010-12-13 14:35:10 -0800239
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700240 spin_lock_irqsave(&clk->lock, flags);
241 if (clk->count) {
Matt Wagantall7205eea2011-11-04 17:31:29 -0700242 start_rate = clk->rate;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700243 /* Enforce vdd requirements for target frequency. */
244 rc = vote_rate_vdd(clk, rate);
245 if (rc)
246 goto err_vote_vdd;
Matt Wagantallca8ff552011-11-03 18:46:52 -0700247 rc = set_fn(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700248 if (rc)
249 goto err_set_rate;
250 /* Release vdd requirements for starting frequency. */
251 unvote_rate_vdd(clk, start_rate);
252 } else {
Matt Wagantallca8ff552011-11-03 18:46:52 -0700253 rc = set_fn(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700254 }
Matt Wagantall7205eea2011-11-04 17:31:29 -0700255
256 if (!rc)
257 clk->rate = rate;
258
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700259 spin_unlock_irqrestore(&clk->lock, flags);
260 return rc;
261
262err_set_rate:
263 unvote_rate_vdd(clk, rate);
264err_vote_vdd:
265 spin_unlock_irqrestore(&clk->lock, flags);
266 return rc;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700267}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700268
Daniel Walker5e96da52010-05-12 13:43:28 -0700269long clk_round_rate(struct clk *clk, unsigned long rate)
270{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 if (!clk->ops->round_rate)
272 return -ENOSYS;
273
274 return clk->ops->round_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700275}
276EXPORT_SYMBOL(clk_round_rate);
277
Matt Wagantallca8ff552011-11-03 18:46:52 -0700278int clk_set_rate(struct clk *clk, unsigned long rate)
279{
280 return _clk_set_rate(clk, rate, clk->ops->set_rate);
281}
282EXPORT_SYMBOL(clk_set_rate);
283
Daniel Walker5e96da52010-05-12 13:43:28 -0700284int clk_set_min_rate(struct clk *clk, unsigned long rate)
285{
Matt Wagantallca8ff552011-11-03 18:46:52 -0700286 return _clk_set_rate(clk, rate, clk->ops->set_min_rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700287}
288EXPORT_SYMBOL(clk_set_min_rate);
289
290int clk_set_max_rate(struct clk *clk, unsigned long rate)
291{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292 if (!clk->ops->set_max_rate)
293 return -ENOSYS;
294
295 return clk->ops->set_max_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700296}
297EXPORT_SYMBOL(clk_set_max_rate);
298
Brian Swetland600f7cf2008-09-09 11:04:14 -0700299int clk_set_parent(struct clk *clk, struct clk *parent)
300{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 if (!clk->ops->set_parent)
302 return 0;
303
304 return clk->ops->set_parent(clk, parent);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700305}
306EXPORT_SYMBOL(clk_set_parent);
307
308struct clk *clk_get_parent(struct clk *clk)
309{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310 if (!clk->ops->get_parent)
311 return NULL;
312
313 return clk->ops->get_parent(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700314}
315EXPORT_SYMBOL(clk_get_parent);
316
317int clk_set_flags(struct clk *clk, unsigned long flags)
318{
319 if (clk == NULL || IS_ERR(clk))
320 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 if (!clk->ops->set_flags)
322 return -ENOSYS;
323
324 return clk->ops->set_flags(clk, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700325}
326EXPORT_SYMBOL(clk_set_flags);
327
Stephen Boydbb600ae2011-08-02 20:11:40 -0700328static struct clock_init_data __initdata *clk_init_data;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700329
Stephen Boydbb600ae2011-08-02 20:11:40 -0700330void __init msm_clock_init(struct clock_init_data *data)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700331{
332 unsigned n;
Stephen Boyd94625ef2011-07-12 17:06:01 -0700333 struct clk_lookup *clock_tbl;
334 size_t num_clocks;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700335
336 clk_init_data = data;
337 if (clk_init_data->init)
338 clk_init_data->init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700339
Stephen Boyd94625ef2011-07-12 17:06:01 -0700340 clock_tbl = data->table;
341 num_clocks = data->size;
342
Stephen Boydbd323442011-02-23 09:37:42 -0800343 for (n = 0; n < num_clocks; n++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344 struct clk *clk = clock_tbl[n].clk;
345 struct clk *parent = clk_get_parent(clk);
346 clk_set_parent(clk, parent);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700347 if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE)) {
348 if (clk->ops->handoff(clk)) {
349 clk->flags |= CLKFLAG_HANDOFF_RATE;
350 clk_enable(clk);
351 }
352 }
Stephen Boydbd323442011-02-23 09:37:42 -0800353 }
Daniel Walker5e96da52010-05-12 13:43:28 -0700354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 clkdev_add_table(clock_tbl, num_clocks);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700356}
357
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358/*
359 * The bootloader and/or AMSS may have left various clocks enabled.
360 * Disable any clocks that have not been explicitly enabled by a
361 * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
Brian Swetland600f7cf2008-09-09 11:04:14 -0700362 */
363static int __init clock_late_init(void)
364{
Stephen Boydbb600ae2011-08-02 20:11:40 -0700365 unsigned n, count = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700366 unsigned long flags;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700367 int ret = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700368
Stephen Boydbb600ae2011-08-02 20:11:40 -0700369 clock_debug_init(clk_init_data);
370 for (n = 0; n < clk_init_data->size; n++) {
371 struct clk *clk = clk_init_data->table[n].clk;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700372 bool handoff = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800374 clock_debug_add(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375 if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
376 spin_lock_irqsave(&clk->lock, flags);
377 if (!clk->count && clk->ops->auto_off) {
Brian Swetland600f7cf2008-09-09 11:04:14 -0700378 count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 clk->ops->auto_off(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700380 }
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700381 if (clk->flags & CLKFLAG_HANDOFF_RATE) {
382 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
383 handoff = true;
384 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 spin_unlock_irqrestore(&clk->lock, flags);
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700386 /*
387 * Calling clk_disable() outside the lock is safe since
388 * it doesn't need to be atomic with the flag change.
389 */
390 if (handoff)
391 clk_disable(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700392 }
393 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700394 pr_info("clock_late_init() disabled %d unused clocks\n", count);
Stephen Boydbb600ae2011-08-02 20:11:40 -0700395 if (clk_init_data->late_init)
396 ret = clk_init_data->late_init();
397 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700398}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700399late_initcall(clock_late_init);