blob: 7f0cafd92e33973244c94c5cafd0c4dd09d27cf6 [file] [log] [blame]
Brian Swetland600f7cf2008-09-09 11:04:14 -07001/* arch/arm/mach-msm/clock.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Tianyi Gou7949ecb2012-02-14 14:25:32 -08004 * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved.
Brian Swetland600f7cf2008-09-09 11:04:14 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Brian Swetland600f7cf2008-09-09 11:04:14 -070017#include <linux/kernel.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070018#include <linux/err.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070019#include <linux/spinlock.h>
Stephen Boydbd323442011-02-23 09:37:42 -080020#include <linux/string.h>
21#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/clk.h>
Stephen Boydbd323442011-02-23 09:37:42 -080023#include <linux/clkdev.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070024
25#include "clock.h"
Brian Swetland600f7cf2008-09-09 11:04:14 -070026
Matt Wagantalle18bbc82011-10-06 10:07:28 -070027/* Find the voltage level required for a given rate. */
28static int find_vdd_level(struct clk *clk, unsigned long rate)
29{
30 int level;
31
32 for (level = 0; level < ARRAY_SIZE(clk->fmax); level++)
33 if (rate <= clk->fmax[level])
34 break;
35
36 if (level == ARRAY_SIZE(clk->fmax)) {
37 pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
38 clk->dbg_name);
39 return -EINVAL;
40 }
41
42 return level;
43}
44
45/* Update voltage level given the current votes. */
46static int update_vdd(struct clk_vdd_class *vdd_class)
47{
48 int level, rc;
49
50 for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--)
51 if (vdd_class->level_votes[level])
52 break;
53
54 if (level == vdd_class->cur_level)
55 return 0;
56
57 rc = vdd_class->set_vdd(vdd_class, level);
58 if (!rc)
59 vdd_class->cur_level = level;
60
61 return rc;
62}
63
64/* Vote for a voltage level. */
65int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
66{
67 unsigned long flags;
68 int rc;
69
70 spin_lock_irqsave(&vdd_class->lock, flags);
71 vdd_class->level_votes[level]++;
72 rc = update_vdd(vdd_class);
73 if (rc)
74 vdd_class->level_votes[level]--;
75 spin_unlock_irqrestore(&vdd_class->lock, flags);
76
77 return rc;
78}
79
80/* Remove vote for a voltage level. */
81int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
82{
83 unsigned long flags;
84 int rc = 0;
85
86 spin_lock_irqsave(&vdd_class->lock, flags);
87 if (WARN(!vdd_class->level_votes[level],
88 "Reference counts are incorrect for %s level %d\n",
89 vdd_class->class_name, level))
90 goto out;
91 vdd_class->level_votes[level]--;
92 rc = update_vdd(vdd_class);
93 if (rc)
94 vdd_class->level_votes[level]++;
95out:
96 spin_unlock_irqrestore(&vdd_class->lock, flags);
97 return rc;
98}
99
100/* Vote for a voltage level corresponding to a clock's rate. */
101static int vote_rate_vdd(struct clk *clk, unsigned long rate)
102{
103 int level;
104
105 if (!clk->vdd_class)
106 return 0;
107
108 level = find_vdd_level(clk, rate);
109 if (level < 0)
110 return level;
111
112 return vote_vdd_level(clk->vdd_class, level);
113}
114
115/* Remove vote for a voltage level corresponding to a clock's rate. */
116static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
117{
118 int level;
119
120 if (!clk->vdd_class)
121 return;
122
123 level = find_vdd_level(clk, rate);
124 if (level < 0)
125 return;
126
127 unvote_vdd_level(clk->vdd_class, level);
128}
129
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800130int clk_prepare(struct clk *clk)
131{
132 int ret = 0;
133 struct clk *parent;
134 if (!clk)
135 return 0;
136
137 mutex_lock(&clk->prepare_lock);
138 if (clk->prepare_count == 0) {
139 parent = clk_get_parent(clk);
140
141 ret = clk_prepare(parent);
142 if (ret)
143 goto out;
144 ret = clk_prepare(clk->depends);
145 if (ret)
146 goto err_prepare_depends;
147
148 if (clk->ops->prepare)
149 ret = clk->ops->prepare(clk);
150 if (ret)
151 goto err_prepare_clock;
152 }
153 clk->prepare_count++;
154out:
155 mutex_unlock(&clk->prepare_lock);
156 return ret;
157err_prepare_clock:
158 clk_unprepare(clk->depends);
159err_prepare_depends:
160 clk_unprepare(parent);
161 goto out;
162}
163EXPORT_SYMBOL(clk_prepare);
164
Brian Swetland600f7cf2008-09-09 11:04:14 -0700165/*
Brian Swetland600f7cf2008-09-09 11:04:14 -0700166 * Standard clock functions defined in include/linux/clk.h
167 */
Brian Swetland600f7cf2008-09-09 11:04:14 -0700168int clk_enable(struct clk *clk)
169{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 int ret = 0;
Matt Wagantall7205eea2011-11-04 17:31:29 -0700171 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 struct clk *parent;
173
174 if (!clk)
175 return 0;
176
177 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800178 if (WARN(!clk->warned && !clk->prepare_count,
179 "%s: Don't call enable on unprepared clocks\n",
180 clk->dbg_name))
181 clk->warned = true;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 if (clk->count == 0) {
183 parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 ret = clk_enable(parent);
186 if (ret)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700187 goto err_enable_parent;
Stephen Boyd7fa26742011-08-11 23:22:29 -0700188 ret = clk_enable(clk->depends);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700189 if (ret)
190 goto err_enable_depends;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191
Matt Wagantall7205eea2011-11-04 17:31:29 -0700192 ret = vote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700193 if (ret)
194 goto err_vote_vdd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 if (clk->ops->enable)
196 ret = clk->ops->enable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700197 if (ret)
198 goto err_enable_clock;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700199 } else if (clk->flags & CLKFLAG_HANDOFF_RATE) {
200 /*
201 * The clock was already enabled by handoff code so there is no
202 * need to enable it again here. Clearing the handoff flag will
203 * prevent the lateinit handoff code from disabling the clock if
204 * a client driver still has it enabled.
205 */
206 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
207 goto out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700209 clk->count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210out:
211 spin_unlock_irqrestore(&clk->lock, flags);
212
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700213 return 0;
214
215err_enable_clock:
Matt Wagantall7205eea2011-11-04 17:31:29 -0700216 unvote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700217err_vote_vdd:
218 clk_disable(clk->depends);
219err_enable_depends:
220 clk_disable(parent);
221err_enable_parent:
222 spin_unlock_irqrestore(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700224}
225EXPORT_SYMBOL(clk_enable);
226
227void clk_disable(struct clk *clk)
228{
229 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230
231 if (!clk)
232 return;
233
234 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800235 if (WARN(!clk->warned && !clk->prepare_count,
236 "%s: Never called prepare or calling disable "
237 "after unprepare\n",
238 clk->dbg_name))
239 clk->warned = true;
Stephen Boydd906b522011-07-26 10:51:41 -0700240 if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 goto out;
242 if (clk->count == 1) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700243 struct clk *parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 if (clk->ops->disable)
246 clk->ops->disable(clk);
Matt Wagantall7205eea2011-11-04 17:31:29 -0700247 unvote_rate_vdd(clk, clk->rate);
Stephen Boyd7fa26742011-08-11 23:22:29 -0700248 clk_disable(clk->depends);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249 clk_disable(parent);
250 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700251 clk->count--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252out:
253 spin_unlock_irqrestore(&clk->lock, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700254}
255EXPORT_SYMBOL(clk_disable);
256
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800257void clk_unprepare(struct clk *clk)
258{
259 if (!clk)
260 return;
261
262 mutex_lock(&clk->prepare_lock);
263 if (!clk->prepare_count) {
264 if (WARN(!clk->warned, "%s is unbalanced (prepare)",
265 clk->dbg_name))
266 clk->warned = true;
267 goto out;
268 }
269 if (clk->prepare_count == 1) {
270 struct clk *parent = clk_get_parent(clk);
271
272 if (WARN(!clk->warned && clk->count,
273 "%s: Don't call unprepare when the clock is enabled\n",
274 clk->dbg_name))
275 clk->warned = true;
276
277 if (clk->ops->unprepare)
278 clk->ops->unprepare(clk);
279 clk_unprepare(clk->depends);
280 clk_unprepare(parent);
281 }
282 clk->prepare_count--;
283out:
284 mutex_unlock(&clk->prepare_lock);
285}
286EXPORT_SYMBOL(clk_unprepare);
287
Daniel Walker5e96da52010-05-12 13:43:28 -0700288int clk_reset(struct clk *clk, enum clk_reset_action action)
289{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 if (!clk->ops->reset)
291 return -ENOSYS;
292
293 return clk->ops->reset(clk, action);
Daniel Walker5e96da52010-05-12 13:43:28 -0700294}
295EXPORT_SYMBOL(clk_reset);
296
Brian Swetland600f7cf2008-09-09 11:04:14 -0700297unsigned long clk_get_rate(struct clk *clk)
298{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 if (!clk->ops->get_rate)
Tianyi Gou7949ecb2012-02-14 14:25:32 -0800300 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301
302 return clk->ops->get_rate(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700303}
304EXPORT_SYMBOL(clk_get_rate);
305
Matt Wagantall77952c42011-11-08 18:45:48 -0800306int clk_set_rate(struct clk *clk, unsigned long rate)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700307{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700308 unsigned long start_rate, flags;
309 int rc;
310
Matt Wagantall77952c42011-11-08 18:45:48 -0800311 if (!clk->ops->set_rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 return -ENOSYS;
Daniel Walker3a790bb2010-12-13 14:35:10 -0800313
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700314 spin_lock_irqsave(&clk->lock, flags);
315 if (clk->count) {
Matt Wagantall7205eea2011-11-04 17:31:29 -0700316 start_rate = clk->rate;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700317 /* Enforce vdd requirements for target frequency. */
318 rc = vote_rate_vdd(clk, rate);
319 if (rc)
320 goto err_vote_vdd;
Matt Wagantall77952c42011-11-08 18:45:48 -0800321 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700322 if (rc)
323 goto err_set_rate;
324 /* Release vdd requirements for starting frequency. */
325 unvote_rate_vdd(clk, start_rate);
326 } else {
Matt Wagantall77952c42011-11-08 18:45:48 -0800327 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700328 }
Matt Wagantall7205eea2011-11-04 17:31:29 -0700329
330 if (!rc)
331 clk->rate = rate;
332
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700333 spin_unlock_irqrestore(&clk->lock, flags);
334 return rc;
335
336err_set_rate:
337 unvote_rate_vdd(clk, rate);
338err_vote_vdd:
339 spin_unlock_irqrestore(&clk->lock, flags);
340 return rc;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700341}
Matt Wagantall77952c42011-11-08 18:45:48 -0800342EXPORT_SYMBOL(clk_set_rate);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700343
Daniel Walker5e96da52010-05-12 13:43:28 -0700344long clk_round_rate(struct clk *clk, unsigned long rate)
345{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346 if (!clk->ops->round_rate)
347 return -ENOSYS;
348
349 return clk->ops->round_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700350}
351EXPORT_SYMBOL(clk_round_rate);
352
Daniel Walker5e96da52010-05-12 13:43:28 -0700353int clk_set_max_rate(struct clk *clk, unsigned long rate)
354{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 if (!clk->ops->set_max_rate)
356 return -ENOSYS;
357
358 return clk->ops->set_max_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700359}
360EXPORT_SYMBOL(clk_set_max_rate);
361
Brian Swetland600f7cf2008-09-09 11:04:14 -0700362int clk_set_parent(struct clk *clk, struct clk *parent)
363{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364 if (!clk->ops->set_parent)
365 return 0;
366
367 return clk->ops->set_parent(clk, parent);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700368}
369EXPORT_SYMBOL(clk_set_parent);
370
371struct clk *clk_get_parent(struct clk *clk)
372{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 if (!clk->ops->get_parent)
374 return NULL;
375
376 return clk->ops->get_parent(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700377}
378EXPORT_SYMBOL(clk_get_parent);
379
380int clk_set_flags(struct clk *clk, unsigned long flags)
381{
382 if (clk == NULL || IS_ERR(clk))
383 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 if (!clk->ops->set_flags)
385 return -ENOSYS;
386
387 return clk->ops->set_flags(clk, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700388}
389EXPORT_SYMBOL(clk_set_flags);
390
Stephen Boydbb600ae2011-08-02 20:11:40 -0700391static struct clock_init_data __initdata *clk_init_data;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700392
Stephen Boydbb600ae2011-08-02 20:11:40 -0700393void __init msm_clock_init(struct clock_init_data *data)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700394{
395 unsigned n;
Stephen Boyd94625ef2011-07-12 17:06:01 -0700396 struct clk_lookup *clock_tbl;
397 size_t num_clocks;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700398
399 clk_init_data = data;
400 if (clk_init_data->init)
401 clk_init_data->init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700402
Stephen Boyd94625ef2011-07-12 17:06:01 -0700403 clock_tbl = data->table;
404 num_clocks = data->size;
405
Stephen Boydbd323442011-02-23 09:37:42 -0800406 for (n = 0; n < num_clocks; n++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 struct clk *clk = clock_tbl[n].clk;
408 struct clk *parent = clk_get_parent(clk);
409 clk_set_parent(clk, parent);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700410 if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE)) {
411 if (clk->ops->handoff(clk)) {
412 clk->flags |= CLKFLAG_HANDOFF_RATE;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800413 clk_prepare_enable(clk);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700414 }
415 }
Stephen Boydbd323442011-02-23 09:37:42 -0800416 }
Daniel Walker5e96da52010-05-12 13:43:28 -0700417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 clkdev_add_table(clock_tbl, num_clocks);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700419}
420
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421/*
422 * The bootloader and/or AMSS may have left various clocks enabled.
423 * Disable any clocks that have not been explicitly enabled by a
424 * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
Brian Swetland600f7cf2008-09-09 11:04:14 -0700425 */
426static int __init clock_late_init(void)
427{
Stephen Boydbb600ae2011-08-02 20:11:40 -0700428 unsigned n, count = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700429 unsigned long flags;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700430 int ret = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700431
Stephen Boydbb600ae2011-08-02 20:11:40 -0700432 clock_debug_init(clk_init_data);
433 for (n = 0; n < clk_init_data->size; n++) {
434 struct clk *clk = clk_init_data->table[n].clk;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700435 bool handoff = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800437 clock_debug_add(clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438 if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
439 spin_lock_irqsave(&clk->lock, flags);
440 if (!clk->count && clk->ops->auto_off) {
Brian Swetland600f7cf2008-09-09 11:04:14 -0700441 count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 clk->ops->auto_off(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700443 }
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700444 if (clk->flags & CLKFLAG_HANDOFF_RATE) {
445 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
446 handoff = true;
447 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 spin_unlock_irqrestore(&clk->lock, flags);
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700449 /*
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800450 * Calling this outside the lock is safe since
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700451 * it doesn't need to be atomic with the flag change.
452 */
453 if (handoff)
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800454 clk_disable_unprepare(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700455 }
456 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700457 pr_info("clock_late_init() disabled %d unused clocks\n", count);
Stephen Boydbb600ae2011-08-02 20:11:40 -0700458 if (clk_init_data->late_init)
459 ret = clk_init_data->late_init();
460 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700461}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700462late_initcall(clock_late_init);