Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 1 | /* arch/arm/mach-msm/clock.c |
| 2 | * |
| 3 | * Copyright (C) 2007 Google, Inc. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 4 | * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved. |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 5 | * |
| 6 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and |
| 8 | * may be copied, distributed, and modified under those terms. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 17 | #include <linux/kernel.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 18 | #include <linux/err.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 19 | #include <linux/spinlock.h> |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 20 | #include <linux/string.h> |
| 21 | #include <linux/module.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 22 | #include <linux/clk.h> |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 23 | #include <linux/clkdev.h> |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 24 | |
| 25 | #include "clock.h" |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 26 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 27 | /* |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 28 | * Standard clock functions defined in include/linux/clk.h |
| 29 | */ |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 30 | int clk_enable(struct clk *clk) |
| 31 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 32 | int ret = 0; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 33 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 34 | struct clk *parent; |
| 35 | |
| 36 | if (!clk) |
| 37 | return 0; |
| 38 | |
| 39 | spin_lock_irqsave(&clk->lock, flags); |
| 40 | if (clk->count == 0) { |
| 41 | parent = clk_get_parent(clk); |
| 42 | ret = clk_enable(parent); |
| 43 | if (ret) |
| 44 | goto out; |
| 45 | |
| 46 | if (clk->ops->enable) |
| 47 | ret = clk->ops->enable(clk); |
| 48 | if (ret) { |
| 49 | clk_disable(parent); |
| 50 | goto out; |
| 51 | } |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame^] | 52 | } else if (clk->flags & CLKFLAG_HANDOFF_RATE) { |
| 53 | /* |
| 54 | * The clock was already enabled by handoff code so there is no |
| 55 | * need to enable it again here. Clearing the handoff flag will |
| 56 | * prevent the lateinit handoff code from disabling the clock if |
| 57 | * a client driver still has it enabled. |
| 58 | */ |
| 59 | clk->flags &= ~CLKFLAG_HANDOFF_RATE; |
| 60 | goto out; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 61 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 62 | clk->count++; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 63 | out: |
| 64 | spin_unlock_irqrestore(&clk->lock, flags); |
| 65 | |
| 66 | return ret; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 67 | } |
| 68 | EXPORT_SYMBOL(clk_enable); |
| 69 | |
| 70 | void clk_disable(struct clk *clk) |
| 71 | { |
| 72 | unsigned long flags; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 73 | struct clk *parent; |
| 74 | |
| 75 | if (!clk) |
| 76 | return; |
| 77 | |
| 78 | spin_lock_irqsave(&clk->lock, flags); |
Stephen Boyd | d906b52 | 2011-07-26 10:51:41 -0700 | [diff] [blame] | 79 | if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name)) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 80 | goto out; |
| 81 | if (clk->count == 1) { |
| 82 | if (clk->ops->disable) |
| 83 | clk->ops->disable(clk); |
| 84 | parent = clk_get_parent(clk); |
| 85 | clk_disable(parent); |
| 86 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 87 | clk->count--; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 88 | out: |
| 89 | spin_unlock_irqrestore(&clk->lock, flags); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 90 | } |
| 91 | EXPORT_SYMBOL(clk_disable); |
| 92 | |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 93 | int clk_reset(struct clk *clk, enum clk_reset_action action) |
| 94 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 95 | if (!clk->ops->reset) |
| 96 | return -ENOSYS; |
| 97 | |
| 98 | return clk->ops->reset(clk, action); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 99 | } |
| 100 | EXPORT_SYMBOL(clk_reset); |
| 101 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 102 | unsigned long clk_get_rate(struct clk *clk) |
| 103 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 104 | if (!clk->ops->get_rate) |
| 105 | return 0; |
| 106 | |
| 107 | return clk->ops->get_rate(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 108 | } |
| 109 | EXPORT_SYMBOL(clk_get_rate); |
| 110 | |
| 111 | int clk_set_rate(struct clk *clk, unsigned long rate) |
| 112 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 113 | if (!clk->ops->set_rate) |
| 114 | return -ENOSYS; |
Daniel Walker | 3a790bb | 2010-12-13 14:35:10 -0800 | [diff] [blame] | 115 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 116 | return clk->ops->set_rate(clk, rate); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 117 | } |
| 118 | EXPORT_SYMBOL(clk_set_rate); |
| 119 | |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 120 | long clk_round_rate(struct clk *clk, unsigned long rate) |
| 121 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 122 | if (!clk->ops->round_rate) |
| 123 | return -ENOSYS; |
| 124 | |
| 125 | return clk->ops->round_rate(clk, rate); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 126 | } |
| 127 | EXPORT_SYMBOL(clk_round_rate); |
| 128 | |
| 129 | int clk_set_min_rate(struct clk *clk, unsigned long rate) |
| 130 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 131 | if (!clk->ops->set_min_rate) |
| 132 | return -ENOSYS; |
| 133 | |
| 134 | return clk->ops->set_min_rate(clk, rate); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 135 | } |
| 136 | EXPORT_SYMBOL(clk_set_min_rate); |
| 137 | |
| 138 | int clk_set_max_rate(struct clk *clk, unsigned long rate) |
| 139 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 140 | if (!clk->ops->set_max_rate) |
| 141 | return -ENOSYS; |
| 142 | |
| 143 | return clk->ops->set_max_rate(clk, rate); |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 144 | } |
| 145 | EXPORT_SYMBOL(clk_set_max_rate); |
| 146 | |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 147 | int clk_set_parent(struct clk *clk, struct clk *parent) |
| 148 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 149 | if (!clk->ops->set_parent) |
| 150 | return 0; |
| 151 | |
| 152 | return clk->ops->set_parent(clk, parent); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 153 | } |
| 154 | EXPORT_SYMBOL(clk_set_parent); |
| 155 | |
| 156 | struct clk *clk_get_parent(struct clk *clk) |
| 157 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 158 | if (!clk->ops->get_parent) |
| 159 | return NULL; |
| 160 | |
| 161 | return clk->ops->get_parent(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 162 | } |
| 163 | EXPORT_SYMBOL(clk_get_parent); |
| 164 | |
| 165 | int clk_set_flags(struct clk *clk, unsigned long flags) |
| 166 | { |
| 167 | if (clk == NULL || IS_ERR(clk)) |
| 168 | return -EINVAL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 169 | if (!clk->ops->set_flags) |
| 170 | return -ENOSYS; |
| 171 | |
| 172 | return clk->ops->set_flags(clk, flags); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 173 | } |
| 174 | EXPORT_SYMBOL(clk_set_flags); |
| 175 | |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 176 | static struct clock_init_data __initdata *clk_init_data; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 177 | |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 178 | void __init msm_clock_init(struct clock_init_data *data) |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 179 | { |
| 180 | unsigned n; |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 181 | struct clk_lookup *clock_tbl = data->table; |
| 182 | size_t num_clocks = data->size; |
| 183 | |
| 184 | clk_init_data = data; |
| 185 | if (clk_init_data->init) |
| 186 | clk_init_data->init(); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 187 | |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 188 | for (n = 0; n < num_clocks; n++) { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 189 | struct clk *clk = clock_tbl[n].clk; |
| 190 | struct clk *parent = clk_get_parent(clk); |
| 191 | clk_set_parent(clk, parent); |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame^] | 192 | if (clk->ops->handoff) |
| 193 | clk->ops->handoff(clk); |
Stephen Boyd | bd32344 | 2011-02-23 09:37:42 -0800 | [diff] [blame] | 194 | } |
Daniel Walker | 5e96da5 | 2010-05-12 13:43:28 -0700 | [diff] [blame] | 195 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 196 | clkdev_add_table(clock_tbl, num_clocks); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 199 | /* |
| 200 | * The bootloader and/or AMSS may have left various clocks enabled. |
| 201 | * Disable any clocks that have not been explicitly enabled by a |
| 202 | * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag. |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 203 | */ |
| 204 | static int __init clock_late_init(void) |
| 205 | { |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 206 | unsigned n, count = 0; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 207 | unsigned long flags; |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 208 | int ret = 0; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 209 | |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 210 | clock_debug_init(clk_init_data); |
| 211 | for (n = 0; n < clk_init_data->size; n++) { |
| 212 | struct clk *clk = clk_init_data->table[n].clk; |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame^] | 213 | bool handoff = false; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 214 | |
Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 215 | clock_debug_add(clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 216 | if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) { |
| 217 | spin_lock_irqsave(&clk->lock, flags); |
| 218 | if (!clk->count && clk->ops->auto_off) { |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 219 | count++; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 220 | clk->ops->auto_off(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 221 | } |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame^] | 222 | if (clk->flags & CLKFLAG_HANDOFF_RATE) { |
| 223 | clk->flags &= ~CLKFLAG_HANDOFF_RATE; |
| 224 | handoff = true; |
| 225 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 226 | spin_unlock_irqrestore(&clk->lock, flags); |
Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame^] | 227 | /* |
| 228 | * Calling clk_disable() outside the lock is safe since |
| 229 | * it doesn't need to be atomic with the flag change. |
| 230 | */ |
| 231 | if (handoff) |
| 232 | clk_disable(clk); |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 233 | } |
| 234 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 235 | pr_info("clock_late_init() disabled %d unused clocks\n", count); |
Stephen Boyd | bb600ae | 2011-08-02 20:11:40 -0700 | [diff] [blame] | 236 | if (clk_init_data->late_init) |
| 237 | ret = clk_init_data->late_init(); |
| 238 | return ret; |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 239 | } |
Brian Swetland | 600f7cf | 2008-09-09 11:04:14 -0700 | [diff] [blame] | 240 | late_initcall(clock_late_init); |