Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 1 | /* arch/arm/mach-msm/clock.c |
| 2 | * |
| 3 | * Copyright (C) 2007 Google, Inc. |
Tianyi Gou | 7949ecb | 2012-02-14 14:25:32 -0800 | [diff] [blame] | 4 | * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved. |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 5 | * |
| 6 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and |
| 8 | * may be copied, distributed, and modified under those terms. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 17 | #include <linux/kernel.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 18 | #include <linux/err.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 19 | #include <linux/spinlock.h> |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 20 | #include <linux/string.h> |
| 21 | #include <linux/module.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 22 | #include <linux/clk.h> |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 23 | #include <linux/clkdev.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 24 | |
| 25 | #include "clock.h" |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 26 | |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 27 | /* Find the voltage level required for a given rate. */ |
| 28 | static int find_vdd_level(struct clk *clk, unsigned long rate) |
| 29 | { |
| 30 | int level; |
| 31 | |
| 32 | for (level = 0; level < ARRAY_SIZE(clk->fmax); level++) |
| 33 | if (rate <= clk->fmax[level]) |
| 34 | break; |
| 35 | |
| 36 | if (level == ARRAY_SIZE(clk->fmax)) { |
| 37 | pr_err("Rate %lu for %s is greater than highest Fmax\n", rate, |
| 38 | clk->dbg_name); |
| 39 | return -EINVAL; |
| 40 | } |
| 41 | |
| 42 | return level; |
| 43 | } |
| 44 | |
| 45 | /* Update voltage level given the current votes. */ |
| 46 | static int update_vdd(struct clk_vdd_class *vdd_class) |
| 47 | { |
| 48 | int level, rc; |
| 49 | |
| 50 | for (level = ARRAY_SIZE(vdd_class->level_votes)-1; level > 0; level--) |
| 51 | if (vdd_class->level_votes[level]) |
| 52 | break; |
| 53 | |
| 54 | if (level == vdd_class->cur_level) |
| 55 | return 0; |
| 56 | |
| 57 | rc = vdd_class->set_vdd(vdd_class, level); |
| 58 | if (!rc) |
| 59 | vdd_class->cur_level = level; |
| 60 | |
| 61 | return rc; |
| 62 | } |
| 63 | |
| 64 | /* Vote for a voltage level. */ |
| 65 | int vote_vdd_level(struct clk_vdd_class *vdd_class, int level) |
| 66 | { |
| 67 | unsigned long flags; |
| 68 | int rc; |
| 69 | |
| 70 | spin_lock_irqsave(&vdd_class->lock, flags); |
| 71 | vdd_class->level_votes[level]++; |
| 72 | rc = update_vdd(vdd_class); |
| 73 | if (rc) |
| 74 | vdd_class->level_votes[level]--; |
| 75 | spin_unlock_irqrestore(&vdd_class->lock, flags); |
| 76 | |
| 77 | return rc; |
| 78 | } |
| 79 | |
| 80 | /* Remove vote for a voltage level. */ |
| 81 | int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level) |
| 82 | { |
| 83 | unsigned long flags; |
| 84 | int rc = 0; |
| 85 | |
| 86 | spin_lock_irqsave(&vdd_class->lock, flags); |
| 87 | if (WARN(!vdd_class->level_votes[level], |
| 88 | "Reference counts are incorrect for %s level %d\n", |
| 89 | vdd_class->class_name, level)) |
| 90 | goto out; |
| 91 | vdd_class->level_votes[level]--; |
| 92 | rc = update_vdd(vdd_class); |
| 93 | if (rc) |
| 94 | vdd_class->level_votes[level]++; |
| 95 | out: |
| 96 | spin_unlock_irqrestore(&vdd_class->lock, flags); |
| 97 | return rc; |
| 98 | } |
| 99 | |
| 100 | /* Vote for a voltage level corresponding to a clock's rate. */ |
| 101 | static int vote_rate_vdd(struct clk *clk, unsigned long rate) |
| 102 | { |
| 103 | int level; |
| 104 | |
| 105 | if (!clk->vdd_class) |
| 106 | return 0; |
| 107 | |
| 108 | level = find_vdd_level(clk, rate); |
| 109 | if (level < 0) |
| 110 | return level; |
| 111 | |
| 112 | return vote_vdd_level(clk->vdd_class, level); |
| 113 | } |
| 114 | |
| 115 | /* Remove vote for a voltage level corresponding to a clock's rate. */ |
| 116 | static void unvote_rate_vdd(struct clk *clk, unsigned long rate) |
| 117 | { |
| 118 | int level; |
| 119 | |
| 120 | if (!clk->vdd_class) |
| 121 | return; |
| 122 | |
| 123 | level = find_vdd_level(clk, rate); |
| 124 | if (level < 0) |
| 125 | return; |
| 126 | |
| 127 | unvote_vdd_level(clk->vdd_class, level); |
| 128 | } |
| 129 | |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame^] | 130 | int clk_prepare(struct clk *clk) |
| 131 | { |
| 132 | int ret = 0; |
| 133 | struct clk *parent; |
| 134 | if (!clk) |
| 135 | return 0; |
| 136 | |
| 137 | mutex_lock(&clk->prepare_lock); |
| 138 | if (clk->prepare_count == 0) { |
| 139 | parent = clk_get_parent(clk); |
| 140 | |
| 141 | ret = clk_prepare(parent); |
| 142 | if (ret) |
| 143 | goto out; |
| 144 | ret = clk_prepare(clk->depends); |
| 145 | if (ret) |
| 146 | goto err_prepare_depends; |
| 147 | |
| 148 | if (clk->ops->prepare) |
| 149 | ret = clk->ops->prepare(clk); |
| 150 | if (ret) |
| 151 | goto err_prepare_clock; |
| 152 | } |
| 153 | clk->prepare_count++; |
| 154 | out: |
| 155 | mutex_unlock(&clk->prepare_lock); |
| 156 | return ret; |
| 157 | err_prepare_clock: |
| 158 | clk_unprepare(clk->depends); |
| 159 | err_prepare_depends: |
| 160 | clk_unprepare(parent); |
| 161 | goto out; |
| 162 | } |
| 163 | EXPORT_SYMBOL(clk_prepare); |
| 164 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 165 | /* |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 166 | * Standard clock functions defined in include/linux/clk.h |
| 167 | */ |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 168 | int clk_enable(struct clk *clk) |
| 169 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 170 | int ret = 0; |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 171 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 172 | struct clk *parent; |
| 173 | |
| 174 | if (!clk) |
| 175 | return 0; |
| 176 | |
| 177 | spin_lock_irqsave(&clk->lock, flags); |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame^] | 178 | if (WARN(!clk->warned && !clk->prepare_count, |
| 179 | "%s: Don't call enable on unprepared clocks\n", |
| 180 | clk->dbg_name)) |
| 181 | clk->warned = true; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 182 | if (clk->count == 0) { |
| 183 | parent = clk_get_parent(clk); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 184 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 185 | ret = clk_enable(parent); |
| 186 | if (ret) |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 187 | goto err_enable_parent; |
Stephen Boyd | 7fa2674 | 2011-08-11 23:22:29 -0700 | [diff] [blame] | 188 | ret = clk_enable(clk->depends); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 189 | if (ret) |
| 190 | goto err_enable_depends; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 191 | |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 192 | ret = vote_rate_vdd(clk, clk->rate); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 193 | if (ret) |
| 194 | goto err_vote_vdd; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 195 | if (clk->ops->enable) |
| 196 | ret = clk->ops->enable(clk); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 197 | if (ret) |
| 198 | goto err_enable_clock; |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 199 | } else if (clk->flags & CLKFLAG_HANDOFF_RATE) { |
| 200 | /* |
| 201 | * The clock was already enabled by handoff code so there is no |
| 202 | * need to enable it again here. Clearing the handoff flag will |
| 203 | * prevent the lateinit handoff code from disabling the clock if |
| 204 | * a client driver still has it enabled. |
| 205 | */ |
| 206 | clk->flags &= ~CLKFLAG_HANDOFF_RATE; |
| 207 | goto out; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 208 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 209 | clk->count++; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 210 | out: |
| 211 | spin_unlock_irqrestore(&clk->lock, flags); |
| 212 | |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 213 | return 0; |
| 214 | |
| 215 | err_enable_clock: |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 216 | unvote_rate_vdd(clk, clk->rate); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 217 | err_vote_vdd: |
| 218 | clk_disable(clk->depends); |
| 219 | err_enable_depends: |
| 220 | clk_disable(parent); |
| 221 | err_enable_parent: |
| 222 | spin_unlock_irqrestore(&clk->lock, flags); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 223 | return ret; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 224 | } |
| 225 | EXPORT_SYMBOL(clk_enable); |
| 226 | |
| 227 | void clk_disable(struct clk *clk) |
| 228 | { |
| 229 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 230 | |
| 231 | if (!clk) |
| 232 | return; |
| 233 | |
| 234 | spin_lock_irqsave(&clk->lock, flags); |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame^] | 235 | if (WARN(!clk->warned && !clk->prepare_count, |
| 236 | "%s: Never called prepare or calling disable " |
| 237 | "after unprepare\n", |
| 238 | clk->dbg_name)) |
| 239 | clk->warned = true; |
Stephen Boyd | d906b52 | 2011-07-26 10:51:41 -0700 | [diff] [blame] | 240 | if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name)) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 241 | goto out; |
| 242 | if (clk->count == 1) { |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 243 | struct clk *parent = clk_get_parent(clk); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 244 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 245 | if (clk->ops->disable) |
| 246 | clk->ops->disable(clk); |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 247 | unvote_rate_vdd(clk, clk->rate); |
Stephen Boyd | 7fa2674 | 2011-08-11 23:22:29 -0700 | [diff] [blame] | 248 | clk_disable(clk->depends); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 249 | clk_disable(parent); |
| 250 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 251 | clk->count--; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 252 | out: |
| 253 | spin_unlock_irqrestore(&clk->lock, flags); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 254 | } |
| 255 | EXPORT_SYMBOL(clk_disable); |
| 256 | |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame^] | 257 | void clk_unprepare(struct clk *clk) |
| 258 | { |
| 259 | if (!clk) |
| 260 | return; |
| 261 | |
| 262 | mutex_lock(&clk->prepare_lock); |
| 263 | if (!clk->prepare_count) { |
| 264 | if (WARN(!clk->warned, "%s is unbalanced (prepare)", |
| 265 | clk->dbg_name)) |
| 266 | clk->warned = true; |
| 267 | goto out; |
| 268 | } |
| 269 | if (clk->prepare_count == 1) { |
| 270 | struct clk *parent = clk_get_parent(clk); |
| 271 | |
| 272 | if (WARN(!clk->warned && clk->count, |
| 273 | "%s: Don't call unprepare when the clock is enabled\n", |
| 274 | clk->dbg_name)) |
| 275 | clk->warned = true; |
| 276 | |
| 277 | if (clk->ops->unprepare) |
| 278 | clk->ops->unprepare(clk); |
| 279 | clk_unprepare(clk->depends); |
| 280 | clk_unprepare(parent); |
| 281 | } |
| 282 | clk->prepare_count--; |
| 283 | out: |
| 284 | mutex_unlock(&clk->prepare_lock); |
| 285 | } |
| 286 | EXPORT_SYMBOL(clk_unprepare); |
| 287 | |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 288 | int clk_reset(struct clk *clk, enum clk_reset_action action) |
| 289 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 290 | if (!clk->ops->reset) |
| 291 | return -ENOSYS; |
| 292 | |
| 293 | return clk->ops->reset(clk, action); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 294 | } |
| 295 | EXPORT_SYMBOL(clk_reset); |
| 296 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 297 | unsigned long clk_get_rate(struct clk *clk) |
| 298 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 299 | if (!clk->ops->get_rate) |
Tianyi Gou | 7949ecb | 2012-02-14 14:25:32 -0800 | [diff] [blame] | 300 | return clk->rate; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 301 | |
| 302 | return clk->ops->get_rate(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 303 | } |
| 304 | EXPORT_SYMBOL(clk_get_rate); |
| 305 | |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 306 | int clk_set_rate(struct clk *clk, unsigned long rate) |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 307 | { |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 308 | unsigned long start_rate, flags; |
| 309 | int rc; |
| 310 | |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 311 | if (!clk->ops->set_rate) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 312 | return -ENOSYS; |
Daniel Walker | 3a790bb | 2010-12-13 14:35:10 -0800 | [diff] [blame] | 313 | |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 314 | spin_lock_irqsave(&clk->lock, flags); |
| 315 | if (clk->count) { |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 316 | start_rate = clk->rate; |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 317 | /* Enforce vdd requirements for target frequency. */ |
| 318 | rc = vote_rate_vdd(clk, rate); |
| 319 | if (rc) |
| 320 | goto err_vote_vdd; |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 321 | rc = clk->ops->set_rate(clk, rate); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 322 | if (rc) |
| 323 | goto err_set_rate; |
| 324 | /* Release vdd requirements for starting frequency. */ |
| 325 | unvote_rate_vdd(clk, start_rate); |
| 326 | } else { |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 327 | rc = clk->ops->set_rate(clk, rate); |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 328 | } |
Matt Wagantall | 7205eea | 2011-11-04 17:31:29 -0700 | [diff] [blame] | 329 | |
| 330 | if (!rc) |
| 331 | clk->rate = rate; |
| 332 | |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 333 | spin_unlock_irqrestore(&clk->lock, flags); |
| 334 | return rc; |
| 335 | |
| 336 | err_set_rate: |
| 337 | unvote_rate_vdd(clk, rate); |
| 338 | err_vote_vdd: |
| 339 | spin_unlock_irqrestore(&clk->lock, flags); |
| 340 | return rc; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 341 | } |
Matt Wagantall | 77952c4 | 2011-11-08 18:45:48 -0800 | [diff] [blame] | 342 | EXPORT_SYMBOL(clk_set_rate); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 343 | |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 344 | long clk_round_rate(struct clk *clk, unsigned long rate) |
| 345 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 346 | if (!clk->ops->round_rate) |
| 347 | return -ENOSYS; |
| 348 | |
| 349 | return clk->ops->round_rate(clk, rate); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 350 | } |
| 351 | EXPORT_SYMBOL(clk_round_rate); |
| 352 | |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 353 | int clk_set_max_rate(struct clk *clk, unsigned long rate) |
| 354 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 355 | if (!clk->ops->set_max_rate) |
| 356 | return -ENOSYS; |
| 357 | |
| 358 | return clk->ops->set_max_rate(clk, rate); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 359 | } |
| 360 | EXPORT_SYMBOL(clk_set_max_rate); |
| 361 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 362 | int clk_set_parent(struct clk *clk, struct clk *parent) |
| 363 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 364 | if (!clk->ops->set_parent) |
| 365 | return 0; |
| 366 | |
| 367 | return clk->ops->set_parent(clk, parent); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 368 | } |
| 369 | EXPORT_SYMBOL(clk_set_parent); |
| 370 | |
| 371 | struct clk *clk_get_parent(struct clk *clk) |
| 372 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 373 | if (!clk->ops->get_parent) |
| 374 | return NULL; |
| 375 | |
| 376 | return clk->ops->get_parent(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 377 | } |
| 378 | EXPORT_SYMBOL(clk_get_parent); |
| 379 | |
| 380 | int clk_set_flags(struct clk *clk, unsigned long flags) |
| 381 | { |
| 382 | if (clk == NULL || IS_ERR(clk)) |
| 383 | return -EINVAL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 384 | if (!clk->ops->set_flags) |
| 385 | return -ENOSYS; |
| 386 | |
| 387 | return clk->ops->set_flags(clk, flags); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 388 | } |
| 389 | EXPORT_SYMBOL(clk_set_flags); |
| 390 | |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 391 | static struct clock_init_data __initdata *clk_init_data; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 392 | |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 393 | void __init msm_clock_init(struct clock_init_data *data) |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 394 | { |
| 395 | unsigned n; |
Stephen Boyd | 94625ef | 2011-07-12 17:06:01 -0700 | [diff] [blame] | 396 | struct clk_lookup *clock_tbl; |
| 397 | size_t num_clocks; |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 398 | |
| 399 | clk_init_data = data; |
| 400 | if (clk_init_data->init) |
| 401 | clk_init_data->init(); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 402 | |
Stephen Boyd | 94625ef | 2011-07-12 17:06:01 -0700 | [diff] [blame] | 403 | clock_tbl = data->table; |
| 404 | num_clocks = data->size; |
| 405 | |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 406 | for (n = 0; n < num_clocks; n++) { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 407 | struct clk *clk = clock_tbl[n].clk; |
| 408 | struct clk *parent = clk_get_parent(clk); |
| 409 | clk_set_parent(clk, parent); |
Matt Wagantall | 271a6cd | 2011-09-20 16:06:31 -0700 | [diff] [blame] | 410 | if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE)) { |
| 411 | if (clk->ops->handoff(clk)) { |
| 412 | clk->flags |= CLKFLAG_HANDOFF_RATE; |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame^] | 413 | clk_prepare_enable(clk); |
Matt Wagantall | 271a6cd | 2011-09-20 16:06:31 -0700 | [diff] [blame] | 414 | } |
| 415 | } |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 416 | } |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 417 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 418 | clkdev_add_table(clock_tbl, num_clocks); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 419 | } |
| 420 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 421 | /* |
| 422 | * The bootloader and/or AMSS may have left various clocks enabled. |
| 423 | * Disable any clocks that have not been explicitly enabled by a |
| 424 | * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag. |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 425 | */ |
| 426 | static int __init clock_late_init(void) |
| 427 | { |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 428 | unsigned n, count = 0; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 429 | unsigned long flags; |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 430 | int ret = 0; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 431 | |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 432 | clock_debug_init(clk_init_data); |
| 433 | for (n = 0; n < clk_init_data->size; n++) { |
| 434 | struct clk *clk = clk_init_data->table[n].clk; |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 435 | bool handoff = false; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 436 | |
Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 437 | clock_debug_add(clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 438 | if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) { |
| 439 | spin_lock_irqsave(&clk->lock, flags); |
| 440 | if (!clk->count && clk->ops->auto_off) { |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 441 | count++; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 442 | clk->ops->auto_off(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 443 | } |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 444 | if (clk->flags & CLKFLAG_HANDOFF_RATE) { |
| 445 | clk->flags &= ~CLKFLAG_HANDOFF_RATE; |
| 446 | handoff = true; |
| 447 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 448 | spin_unlock_irqrestore(&clk->lock, flags); |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 449 | /* |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame^] | 450 | * Calling this outside the lock is safe since |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 451 | * it doesn't need to be atomic with the flag change. |
| 452 | */ |
| 453 | if (handoff) |
Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame^] | 454 | clk_disable_unprepare(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 455 | } |
| 456 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 457 | pr_info("clock_late_init() disabled %d unused clocks\n", count); |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 458 | if (clk_init_data->late_init) |
| 459 | ret = clk_init_data->late_init(); |
| 460 | return ret; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 461 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 462 | late_initcall(clock_late_init); |