blob: 8a1c6eb59fb812b41728aecd75b230da6b4206d2 [file] [log] [blame]
Brian Swetland600f7cf2008-09-09 11:04:14 -07001/* arch/arm/mach-msm/clock.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Tianyi Gou7949ecb2012-02-14 14:25:32 -08004 * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved.
Brian Swetland600f7cf2008-09-09 11:04:14 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Brian Swetland600f7cf2008-09-09 11:04:14 -070017#include <linux/kernel.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070018#include <linux/err.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070019#include <linux/spinlock.h>
Stephen Boydbd323442011-02-23 09:37:42 -080020#include <linux/string.h>
21#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/clk.h>
Stephen Boydbd323442011-02-23 09:37:42 -080023#include <linux/clkdev.h>
Stephen Boyd5bc44d52012-03-29 11:00:57 -070024#include <trace/events/power.h>
Brian Swetland600f7cf2008-09-09 11:04:14 -070025
26#include "clock.h"
Brian Swetland600f7cf2008-09-09 11:04:14 -070027
Matt Wagantalle18bbc82011-10-06 10:07:28 -070028/* Find the voltage level required for a given rate. */
29static int find_vdd_level(struct clk *clk, unsigned long rate)
30{
31 int level;
32
33 for (level = 0; level < ARRAY_SIZE(clk->fmax); level++)
34 if (rate <= clk->fmax[level])
35 break;
36
37 if (level == ARRAY_SIZE(clk->fmax)) {
38 pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
39 clk->dbg_name);
40 return -EINVAL;
41 }
42
43 return level;
44}
45
46/* Update voltage level given the current votes. */
47static int update_vdd(struct clk_vdd_class *vdd_class)
48{
49 int level, rc;
50
51 for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--)
52 if (vdd_class->level_votes[level])
53 break;
54
55 if (level == vdd_class->cur_level)
56 return 0;
57
58 rc = vdd_class->set_vdd(vdd_class, level);
59 if (!rc)
60 vdd_class->cur_level = level;
61
62 return rc;
63}
64
65/* Vote for a voltage level. */
66int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
67{
68 unsigned long flags;
69 int rc;
70
71 spin_lock_irqsave(&vdd_class->lock, flags);
72 vdd_class->level_votes[level]++;
73 rc = update_vdd(vdd_class);
74 if (rc)
75 vdd_class->level_votes[level]--;
76 spin_unlock_irqrestore(&vdd_class->lock, flags);
77
78 return rc;
79}
80
81/* Remove vote for a voltage level. */
82int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
83{
84 unsigned long flags;
85 int rc = 0;
86
87 spin_lock_irqsave(&vdd_class->lock, flags);
88 if (WARN(!vdd_class->level_votes[level],
89 "Reference counts are incorrect for %s level %d\n",
90 vdd_class->class_name, level))
91 goto out;
92 vdd_class->level_votes[level]--;
93 rc = update_vdd(vdd_class);
94 if (rc)
95 vdd_class->level_votes[level]++;
96out:
97 spin_unlock_irqrestore(&vdd_class->lock, flags);
98 return rc;
99}
100
101/* Vote for a voltage level corresponding to a clock's rate. */
102static int vote_rate_vdd(struct clk *clk, unsigned long rate)
103{
104 int level;
105
106 if (!clk->vdd_class)
107 return 0;
108
109 level = find_vdd_level(clk, rate);
110 if (level < 0)
111 return level;
112
113 return vote_vdd_level(clk->vdd_class, level);
114}
115
116/* Remove vote for a voltage level corresponding to a clock's rate. */
117static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
118{
119 int level;
120
121 if (!clk->vdd_class)
122 return;
123
124 level = find_vdd_level(clk, rate);
125 if (level < 0)
126 return;
127
128 unvote_vdd_level(clk->vdd_class, level);
129}
130
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800131int clk_prepare(struct clk *clk)
132{
133 int ret = 0;
134 struct clk *parent;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700135
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800136 if (!clk)
137 return 0;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700138 if (IS_ERR(clk))
139 return -EINVAL;
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800140
141 mutex_lock(&clk->prepare_lock);
142 if (clk->prepare_count == 0) {
143 parent = clk_get_parent(clk);
144
145 ret = clk_prepare(parent);
146 if (ret)
147 goto out;
148 ret = clk_prepare(clk->depends);
149 if (ret)
150 goto err_prepare_depends;
151
152 if (clk->ops->prepare)
153 ret = clk->ops->prepare(clk);
154 if (ret)
155 goto err_prepare_clock;
156 }
157 clk->prepare_count++;
158out:
159 mutex_unlock(&clk->prepare_lock);
160 return ret;
161err_prepare_clock:
162 clk_unprepare(clk->depends);
163err_prepare_depends:
164 clk_unprepare(parent);
165 goto out;
166}
167EXPORT_SYMBOL(clk_prepare);
168
Brian Swetland600f7cf2008-09-09 11:04:14 -0700169/*
Brian Swetland600f7cf2008-09-09 11:04:14 -0700170 * Standard clock functions defined in include/linux/clk.h
171 */
Brian Swetland600f7cf2008-09-09 11:04:14 -0700172int clk_enable(struct clk *clk)
173{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 int ret = 0;
Matt Wagantall7205eea2011-11-04 17:31:29 -0700175 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 struct clk *parent;
177
178 if (!clk)
179 return 0;
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700180 if (IS_ERR(clk))
181 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182
183 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800184 if (WARN(!clk->warned && !clk->prepare_count,
185 "%s: Don't call enable on unprepared clocks\n",
186 clk->dbg_name))
187 clk->warned = true;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 if (clk->count == 0) {
189 parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700190
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 ret = clk_enable(parent);
192 if (ret)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700193 goto err_enable_parent;
Stephen Boyd7fa26742011-08-11 23:22:29 -0700194 ret = clk_enable(clk->depends);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700195 if (ret)
196 goto err_enable_depends;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197
Matt Wagantall7205eea2011-11-04 17:31:29 -0700198 ret = vote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700199 if (ret)
200 goto err_vote_vdd;
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700201 trace_clock_enable(clk->dbg_name, 1, smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 if (clk->ops->enable)
203 ret = clk->ops->enable(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700204 if (ret)
205 goto err_enable_clock;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700206 } else if (clk->flags & CLKFLAG_HANDOFF_RATE) {
207 /*
208 * The clock was already enabled by handoff code so there is no
209 * need to enable it again here. Clearing the handoff flag will
210 * prevent the lateinit handoff code from disabling the clock if
211 * a client driver still has it enabled.
212 */
213 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
214 goto out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700216 clk->count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217out:
218 spin_unlock_irqrestore(&clk->lock, flags);
219
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700220 return 0;
221
222err_enable_clock:
Matt Wagantall7205eea2011-11-04 17:31:29 -0700223 unvote_rate_vdd(clk, clk->rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700224err_vote_vdd:
225 clk_disable(clk->depends);
226err_enable_depends:
227 clk_disable(parent);
228err_enable_parent:
229 spin_unlock_irqrestore(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700231}
232EXPORT_SYMBOL(clk_enable);
233
234void clk_disable(struct clk *clk)
235{
236 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700238 if (IS_ERR_OR_NULL(clk))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239 return;
240
241 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800242 if (WARN(!clk->warned && !clk->prepare_count,
243 "%s: Never called prepare or calling disable "
244 "after unprepare\n",
245 clk->dbg_name))
246 clk->warned = true;
Stephen Boydd906b522011-07-26 10:51:41 -0700247 if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248 goto out;
249 if (clk->count == 1) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700250 struct clk *parent = clk_get_parent(clk);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700251
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700252 trace_clock_disable(clk->dbg_name, 0, smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 if (clk->ops->disable)
254 clk->ops->disable(clk);
Matt Wagantall7205eea2011-11-04 17:31:29 -0700255 unvote_rate_vdd(clk, clk->rate);
Stephen Boyd7fa26742011-08-11 23:22:29 -0700256 clk_disable(clk->depends);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 clk_disable(parent);
258 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700259 clk->count--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260out:
261 spin_unlock_irqrestore(&clk->lock, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700262}
263EXPORT_SYMBOL(clk_disable);
264
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800265void clk_unprepare(struct clk *clk)
266{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700267 if (IS_ERR_OR_NULL(clk))
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800268 return;
269
270 mutex_lock(&clk->prepare_lock);
271 if (!clk->prepare_count) {
272 if (WARN(!clk->warned, "%s is unbalanced (prepare)",
273 clk->dbg_name))
274 clk->warned = true;
275 goto out;
276 }
277 if (clk->prepare_count == 1) {
278 struct clk *parent = clk_get_parent(clk);
279
280 if (WARN(!clk->warned && clk->count,
281 "%s: Don't call unprepare when the clock is enabled\n",
282 clk->dbg_name))
283 clk->warned = true;
284
285 if (clk->ops->unprepare)
286 clk->ops->unprepare(clk);
287 clk_unprepare(clk->depends);
288 clk_unprepare(parent);
289 }
290 clk->prepare_count--;
291out:
292 mutex_unlock(&clk->prepare_lock);
293}
294EXPORT_SYMBOL(clk_unprepare);
295
Daniel Walker5e96da52010-05-12 13:43:28 -0700296int clk_reset(struct clk *clk, enum clk_reset_action action)
297{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700298 if (IS_ERR_OR_NULL(clk))
299 return -EINVAL;
300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 if (!clk->ops->reset)
302 return -ENOSYS;
303
304 return clk->ops->reset(clk, action);
Daniel Walker5e96da52010-05-12 13:43:28 -0700305}
306EXPORT_SYMBOL(clk_reset);
307
Brian Swetland600f7cf2008-09-09 11:04:14 -0700308unsigned long clk_get_rate(struct clk *clk)
309{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700310 if (IS_ERR_OR_NULL(clk))
311 return 0;
312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 if (!clk->ops->get_rate)
Tianyi Gou7949ecb2012-02-14 14:25:32 -0800314 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315
316 return clk->ops->get_rate(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700317}
318EXPORT_SYMBOL(clk_get_rate);
319
Matt Wagantall77952c42011-11-08 18:45:48 -0800320int clk_set_rate(struct clk *clk, unsigned long rate)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700321{
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700322 unsigned long start_rate, flags;
Stephen Boyd4fefefc2012-04-13 13:37:46 -0700323 int rc = 0;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700324
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700325 if (IS_ERR_OR_NULL(clk))
326 return -EINVAL;
327
Matt Wagantall77952c42011-11-08 18:45:48 -0800328 if (!clk->ops->set_rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329 return -ENOSYS;
Daniel Walker3a790bb2010-12-13 14:35:10 -0800330
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700331 spin_lock_irqsave(&clk->lock, flags);
Stephen Boyd4fefefc2012-04-13 13:37:46 -0700332
333 /* Return early if the rate isn't going to change */
334 if (clk->rate == rate)
335 goto out;
336
Stephen Boyd5bc44d52012-03-29 11:00:57 -0700337 trace_clock_set_rate(clk->dbg_name, rate, smp_processor_id());
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700338 if (clk->count) {
Matt Wagantall7205eea2011-11-04 17:31:29 -0700339 start_rate = clk->rate;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700340 /* Enforce vdd requirements for target frequency. */
341 rc = vote_rate_vdd(clk, rate);
342 if (rc)
343 goto err_vote_vdd;
Matt Wagantall77952c42011-11-08 18:45:48 -0800344 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700345 if (rc)
346 goto err_set_rate;
347 /* Release vdd requirements for starting frequency. */
348 unvote_rate_vdd(clk, start_rate);
349 } else {
Matt Wagantall77952c42011-11-08 18:45:48 -0800350 rc = clk->ops->set_rate(clk, rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700351 }
Matt Wagantall7205eea2011-11-04 17:31:29 -0700352
353 if (!rc)
354 clk->rate = rate;
Stephen Boyd4fefefc2012-04-13 13:37:46 -0700355out:
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700356 spin_unlock_irqrestore(&clk->lock, flags);
357 return rc;
358
359err_set_rate:
360 unvote_rate_vdd(clk, rate);
361err_vote_vdd:
362 spin_unlock_irqrestore(&clk->lock, flags);
363 return rc;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700364}
Matt Wagantall77952c42011-11-08 18:45:48 -0800365EXPORT_SYMBOL(clk_set_rate);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700366
Daniel Walker5e96da52010-05-12 13:43:28 -0700367long clk_round_rate(struct clk *clk, unsigned long rate)
368{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700369 if (IS_ERR_OR_NULL(clk))
370 return -EINVAL;
371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 if (!clk->ops->round_rate)
373 return -ENOSYS;
374
375 return clk->ops->round_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700376}
377EXPORT_SYMBOL(clk_round_rate);
378
Daniel Walker5e96da52010-05-12 13:43:28 -0700379int clk_set_max_rate(struct clk *clk, unsigned long rate)
380{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700381 if (IS_ERR_OR_NULL(clk))
382 return -EINVAL;
383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 if (!clk->ops->set_max_rate)
385 return -ENOSYS;
386
387 return clk->ops->set_max_rate(clk, rate);
Daniel Walker5e96da52010-05-12 13:43:28 -0700388}
389EXPORT_SYMBOL(clk_set_max_rate);
390
Brian Swetland600f7cf2008-09-09 11:04:14 -0700391int clk_set_parent(struct clk *clk, struct clk *parent)
392{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 if (!clk->ops->set_parent)
394 return 0;
395
396 return clk->ops->set_parent(clk, parent);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700397}
398EXPORT_SYMBOL(clk_set_parent);
399
400struct clk *clk_get_parent(struct clk *clk)
401{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700402 if (IS_ERR_OR_NULL(clk))
403 return NULL;
404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 if (!clk->ops->get_parent)
406 return NULL;
407
408 return clk->ops->get_parent(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700409}
410EXPORT_SYMBOL(clk_get_parent);
411
412int clk_set_flags(struct clk *clk, unsigned long flags)
413{
Vikram Mulukutla55e8f992012-04-17 19:25:10 -0700414 if (IS_ERR_OR_NULL(clk))
Brian Swetland600f7cf2008-09-09 11:04:14 -0700415 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 if (!clk->ops->set_flags)
417 return -ENOSYS;
418
419 return clk->ops->set_flags(clk, flags);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700420}
421EXPORT_SYMBOL(clk_set_flags);
422
Stephen Boydbb600ae2011-08-02 20:11:40 -0700423static struct clock_init_data __initdata *clk_init_data;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700424
Stephen Boydbb600ae2011-08-02 20:11:40 -0700425void __init msm_clock_init(struct clock_init_data *data)
Brian Swetland600f7cf2008-09-09 11:04:14 -0700426{
427 unsigned n;
Stephen Boyd94625ef2011-07-12 17:06:01 -0700428 struct clk_lookup *clock_tbl;
429 size_t num_clocks;
Matt Wagantallb37fea42012-04-04 16:47:23 -0700430 struct clk *clk;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700431
432 clk_init_data = data;
Matt Wagantallb64888f2012-04-02 21:35:07 -0700433 if (clk_init_data->pre_init)
434 clk_init_data->pre_init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700435
Stephen Boyd94625ef2011-07-12 17:06:01 -0700436 clock_tbl = data->table;
437 num_clocks = data->size;
438
Stephen Boydbd323442011-02-23 09:37:42 -0800439 for (n = 0; n < num_clocks; n++) {
Matt Wagantallb37fea42012-04-04 16:47:23 -0700440 struct clk *parent;
441 clk = clock_tbl[n].clk;
442 parent = clk_get_parent(clk);
Matt Wagantalle1482bf2012-04-04 16:23:45 -0700443 if (parent && list_empty(&clk->siblings))
444 list_add(&clk->siblings, &parent->children);
Matt Wagantallb37fea42012-04-04 16:47:23 -0700445 }
446
447 /*
448 * Detect and preserve initial clock state until clock_late_init() or
449 * a driver explicitly changes it, whichever is first.
450 */
451 for (n = 0; n < num_clocks; n++) {
452 clk = clock_tbl[n].clk;
453 if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE) &&
454 (clk->ops->handoff(clk) == HANDOFF_ENABLED_CLK)) {
455 clk->flags |= CLKFLAG_HANDOFF_RATE;
456 clk_prepare_enable(clk);
Matt Wagantall271a6cd2011-09-20 16:06:31 -0700457 }
Stephen Boydbd323442011-02-23 09:37:42 -0800458 }
Daniel Walker5e96da52010-05-12 13:43:28 -0700459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 clkdev_add_table(clock_tbl, num_clocks);
Matt Wagantallb64888f2012-04-02 21:35:07 -0700461
462 if (clk_init_data->post_init)
463 clk_init_data->post_init();
Brian Swetland600f7cf2008-09-09 11:04:14 -0700464}
465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466/*
467 * The bootloader and/or AMSS may have left various clocks enabled.
468 * Disable any clocks that have not been explicitly enabled by a
469 * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
Brian Swetland600f7cf2008-09-09 11:04:14 -0700470 */
471static int __init clock_late_init(void)
472{
Stephen Boydbb600ae2011-08-02 20:11:40 -0700473 unsigned n, count = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700474 unsigned long flags;
Stephen Boydbb600ae2011-08-02 20:11:40 -0700475 int ret = 0;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700476
Stephen Boydbb600ae2011-08-02 20:11:40 -0700477 clock_debug_init(clk_init_data);
478 for (n = 0; n < clk_init_data->size; n++) {
479 struct clk *clk = clk_init_data->table[n].clk;
Matt Wagantall14dc2af2011-08-12 13:16:06 -0700480 bool handoff = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800482 clock_debug_add(clk);
Matt Wagantall8c9dc382012-04-03 18:45:16 -0700483 spin_lock_irqsave(&clk->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 if (!clk->count && clk->ops->auto_off) {
Brian Swetland600f7cf2008-09-09 11:04:14 -0700486 count++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 clk->ops->auto_off(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700488 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700489 }
Matt Wagantall8c9dc382012-04-03 18:45:16 -0700490 if (clk->flags & CLKFLAG_HANDOFF_RATE) {
491 clk->flags &= ~CLKFLAG_HANDOFF_RATE;
492 handoff = true;
493 }
494 spin_unlock_irqrestore(&clk->lock, flags);
495 /*
496 * Calling this outside the lock is safe since
497 * it doesn't need to be atomic with the flag change.
498 */
499 if (handoff)
500 clk_disable_unprepare(clk);
Brian Swetland600f7cf2008-09-09 11:04:14 -0700501 }
Brian Swetland600f7cf2008-09-09 11:04:14 -0700502 pr_info("clock_late_init() disabled %d unused clocks\n", count);
Stephen Boydbb600ae2011-08-02 20:11:40 -0700503 if (clk_init_data->late_init)
504 ret = clk_init_data->late_init();
505 return ret;
Brian Swetland600f7cf2008-09-09 11:04:14 -0700506}
Brian Swetland600f7cf2008-09-09 11:04:14 -0700507late_initcall(clock_late_init);