| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * arch/sh/kernel/cpu/clock.c - SuperH clock framework | 
 | 3 |  * | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 4 |  *  Copyright (C) 2005 - 2009  Paul Mundt | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 5 |  * | 
 | 6 |  * This clock framework is derived from the OMAP version by: | 
 | 7 |  * | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 8 |  *	Copyright (C) 2004 - 2008 Nokia Corporation | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 9 |  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> | 
 | 10 |  * | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 11 |  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> | 
 | 12 |  * | 
| Paul Mundt | 0dae895 | 2009-05-12 06:18:09 +0900 | [diff] [blame] | 13 |  *  With clkdev bits: | 
 | 14 |  * | 
 | 15 |  *	Copyright (C) 2008 Russell King. | 
 | 16 |  * | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 17 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 18 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 19 |  * for more details. | 
 | 20 |  */ | 
 | 21 | #include <linux/kernel.h> | 
 | 22 | #include <linux/init.h> | 
 | 23 | #include <linux/module.h> | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 24 | #include <linux/mutex.h> | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 25 | #include <linux/list.h> | 
| Francesco VIRLINZI | 4a55026 | 2009-03-11 07:42:05 +0000 | [diff] [blame] | 26 | #include <linux/kobject.h> | 
 | 27 | #include <linux/sysdev.h> | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 28 | #include <linux/seq_file.h> | 
 | 29 | #include <linux/err.h> | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 30 | #include <linux/platform_device.h> | 
| Paul Mundt | cedcf33 | 2009-05-13 21:51:28 +0900 | [diff] [blame] | 31 | #include <linux/debugfs.h> | 
| Magnus Damm | c94a857 | 2009-05-25 08:10:28 +0000 | [diff] [blame] | 32 | #include <linux/cpufreq.h> | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 33 | #include <asm/clock.h> | 
| Paul Mundt | 253b088 | 2009-05-13 17:38:11 +0900 | [diff] [blame] | 34 | #include <asm/machvec.h> | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 35 |  | 
 | 36 | static LIST_HEAD(clock_list); | 
 | 37 | static DEFINE_SPINLOCK(clock_lock); | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 38 | static DEFINE_MUTEX(clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 39 |  | 
| Magnus Damm | c94a857 | 2009-05-25 08:10:28 +0000 | [diff] [blame] | 40 | void clk_rate_table_build(struct clk *clk, | 
 | 41 | 			  struct cpufreq_frequency_table *freq_table, | 
 | 42 | 			  int nr_freqs, | 
 | 43 | 			  struct clk_div_mult_table *src_table, | 
 | 44 | 			  unsigned long *bitmap) | 
 | 45 | { | 
 | 46 | 	unsigned long mult, div; | 
 | 47 | 	unsigned long freq; | 
 | 48 | 	int i; | 
 | 49 |  | 
 | 50 | 	for (i = 0; i < nr_freqs; i++) { | 
 | 51 | 		div = 1; | 
 | 52 | 		mult = 1; | 
 | 53 |  | 
 | 54 | 		if (src_table->divisors && i < src_table->nr_divisors) | 
 | 55 | 			div = src_table->divisors[i]; | 
 | 56 |  | 
 | 57 | 		if (src_table->multipliers && i < src_table->nr_multipliers) | 
 | 58 | 			mult = src_table->multipliers[i]; | 
 | 59 |  | 
 | 60 | 		if (!div || !mult || (bitmap && !test_bit(i, bitmap))) | 
 | 61 | 			freq = CPUFREQ_ENTRY_INVALID; | 
 | 62 | 		else | 
 | 63 | 			freq = clk->parent->rate * mult / div; | 
 | 64 |  | 
 | 65 | 		freq_table[i].index = i; | 
 | 66 | 		freq_table[i].frequency = freq; | 
 | 67 | 	} | 
 | 68 |  | 
 | 69 | 	/* Termination entry */ | 
 | 70 | 	freq_table[i].index = i; | 
 | 71 | 	freq_table[i].frequency = CPUFREQ_TABLE_END; | 
 | 72 | } | 
 | 73 |  | 
 | 74 | long clk_rate_table_round(struct clk *clk, | 
 | 75 | 			  struct cpufreq_frequency_table *freq_table, | 
 | 76 | 			  unsigned long rate) | 
 | 77 | { | 
 | 78 | 	unsigned long rate_error, rate_error_prev = ~0UL; | 
 | 79 | 	unsigned long rate_best_fit = rate; | 
 | 80 | 	unsigned long highest, lowest; | 
 | 81 | 	int i; | 
 | 82 |  | 
 | 83 | 	highest = lowest = 0; | 
 | 84 |  | 
 | 85 | 	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { | 
 | 86 | 		unsigned long freq = freq_table[i].frequency; | 
 | 87 |  | 
 | 88 | 		if (freq == CPUFREQ_ENTRY_INVALID) | 
 | 89 | 			continue; | 
 | 90 |  | 
 | 91 | 		if (freq > highest) | 
 | 92 | 			highest = freq; | 
 | 93 | 		if (freq < lowest) | 
 | 94 | 			lowest = freq; | 
 | 95 |  | 
 | 96 | 		rate_error = abs(freq - rate); | 
 | 97 | 		if (rate_error < rate_error_prev) { | 
 | 98 | 			rate_best_fit = freq; | 
 | 99 | 			rate_error_prev = rate_error; | 
 | 100 | 		} | 
 | 101 |  | 
 | 102 | 		if (rate_error == 0) | 
 | 103 | 			break; | 
 | 104 | 	} | 
 | 105 |  | 
 | 106 | 	if (rate >= highest) | 
 | 107 | 		rate_best_fit = highest; | 
 | 108 | 	if (rate <= lowest) | 
 | 109 | 		rate_best_fit = lowest; | 
 | 110 |  | 
 | 111 | 	return rate_best_fit; | 
 | 112 | } | 
 | 113 |  | 
| Magnus Damm | 098dee9 | 2009-06-04 05:31:41 +0000 | [diff] [blame] | 114 | int clk_rate_table_find(struct clk *clk, | 
 | 115 | 			struct cpufreq_frequency_table *freq_table, | 
 | 116 | 			unsigned long rate) | 
 | 117 | { | 
 | 118 | 	int i; | 
 | 119 |  | 
 | 120 | 	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { | 
 | 121 | 		unsigned long freq = freq_table[i].frequency; | 
 | 122 |  | 
 | 123 | 		if (freq == CPUFREQ_ENTRY_INVALID) | 
 | 124 | 			continue; | 
 | 125 |  | 
 | 126 | 		if (freq == rate) | 
 | 127 | 			return i; | 
 | 128 | 	} | 
 | 129 |  | 
 | 130 | 	return -ENOENT; | 
 | 131 | } | 
 | 132 |  | 
| Paul Mundt | a02cb23 | 2009-05-12 03:50:44 +0900 | [diff] [blame] | 133 | /* Used for clocks that always have same value as the parent clock */ | 
 | 134 | unsigned long followparent_recalc(struct clk *clk) | 
 | 135 | { | 
| Paul Mundt | 549b5e3 | 2009-05-14 17:38:46 +0900 | [diff] [blame] | 136 | 	return clk->parent ? clk->parent->rate : 0; | 
| Paul Mundt | a02cb23 | 2009-05-12 03:50:44 +0900 | [diff] [blame] | 137 | } | 
 | 138 |  | 
| Paul Mundt | aa87aa3 | 2009-05-12 05:51:05 +0900 | [diff] [blame] | 139 | int clk_reparent(struct clk *child, struct clk *parent) | 
 | 140 | { | 
 | 141 | 	list_del_init(&child->sibling); | 
 | 142 | 	if (parent) | 
 | 143 | 		list_add(&child->sibling, &parent->children); | 
 | 144 | 	child->parent = parent; | 
 | 145 |  | 
 | 146 | 	/* now do the debugfs renaming to reattach the child | 
 | 147 | 	   to the proper parent */ | 
 | 148 |  | 
 | 149 | 	return 0; | 
 | 150 | } | 
 | 151 |  | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 152 | /* Propagate rate to children */ | 
 | 153 | void propagate_rate(struct clk *tclk) | 
 | 154 | { | 
 | 155 | 	struct clk *clkp; | 
 | 156 |  | 
 | 157 | 	list_for_each_entry(clkp, &tclk->children, sibling) { | 
| Paul Mundt | d672fef | 2009-05-13 17:03:09 +0900 | [diff] [blame] | 158 | 		if (clkp->ops && clkp->ops->recalc) | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 159 | 			clkp->rate = clkp->ops->recalc(clkp); | 
| Paul Mundt | cc96eac | 2009-05-13 20:28:15 +0900 | [diff] [blame] | 160 |  | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 161 | 		propagate_rate(clkp); | 
 | 162 | 	} | 
 | 163 | } | 
 | 164 |  | 
| Adrian Bunk | 4c1cfab | 2008-06-18 03:36:50 +0300 | [diff] [blame] | 165 | static void __clk_disable(struct clk *clk) | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 166 | { | 
| Paul Mundt | ae891a4 | 2009-05-12 05:30:10 +0900 | [diff] [blame] | 167 | 	if (clk->usecount == 0) { | 
 | 168 | 		printk(KERN_ERR "Trying disable clock %s with 0 usecount\n", | 
 | 169 | 		       clk->name); | 
 | 170 | 		WARN_ON(1); | 
 | 171 | 		return; | 
 | 172 | 	} | 
 | 173 |  | 
 | 174 | 	if (!(--clk->usecount)) { | 
| dmitry pervushin | 1929cb3 | 2007-04-24 13:39:09 +0900 | [diff] [blame] | 175 | 		if (likely(clk->ops && clk->ops->disable)) | 
 | 176 | 			clk->ops->disable(clk); | 
| Paul Mundt | 4ff29ff | 2009-05-12 05:14:53 +0900 | [diff] [blame] | 177 | 		if (likely(clk->parent)) | 
 | 178 | 			__clk_disable(clk->parent); | 
| dmitry pervushin | 1929cb3 | 2007-04-24 13:39:09 +0900 | [diff] [blame] | 179 | 	} | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 180 | } | 
 | 181 |  | 
 | 182 | void clk_disable(struct clk *clk) | 
 | 183 | { | 
 | 184 | 	unsigned long flags; | 
 | 185 |  | 
| Paul Mundt | 4ff29ff | 2009-05-12 05:14:53 +0900 | [diff] [blame] | 186 | 	if (!clk) | 
 | 187 | 		return; | 
 | 188 |  | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 189 | 	spin_lock_irqsave(&clock_lock, flags); | 
 | 190 | 	__clk_disable(clk); | 
 | 191 | 	spin_unlock_irqrestore(&clock_lock, flags); | 
 | 192 | } | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 193 | EXPORT_SYMBOL_GPL(clk_disable); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 194 |  | 
| Paul Mundt | ae891a4 | 2009-05-12 05:30:10 +0900 | [diff] [blame] | 195 | static int __clk_enable(struct clk *clk) | 
 | 196 | { | 
 | 197 | 	int ret = 0; | 
 | 198 |  | 
 | 199 | 	if (clk->usecount++ == 0) { | 
 | 200 | 		if (clk->parent) { | 
 | 201 | 			ret = __clk_enable(clk->parent); | 
 | 202 | 			if (unlikely(ret)) | 
 | 203 | 				goto err; | 
 | 204 | 		} | 
 | 205 |  | 
 | 206 | 		if (clk->ops && clk->ops->enable) { | 
 | 207 | 			ret = clk->ops->enable(clk); | 
 | 208 | 			if (ret) { | 
 | 209 | 				if (clk->parent) | 
 | 210 | 					__clk_disable(clk->parent); | 
 | 211 | 				goto err; | 
 | 212 | 			} | 
 | 213 | 		} | 
 | 214 | 	} | 
 | 215 |  | 
 | 216 | 	return ret; | 
 | 217 | err: | 
 | 218 | 	clk->usecount--; | 
 | 219 | 	return ret; | 
 | 220 | } | 
 | 221 |  | 
 | 222 | int clk_enable(struct clk *clk) | 
 | 223 | { | 
 | 224 | 	unsigned long flags; | 
 | 225 | 	int ret; | 
 | 226 |  | 
 | 227 | 	if (!clk) | 
 | 228 | 		return -EINVAL; | 
 | 229 |  | 
 | 230 | 	spin_lock_irqsave(&clock_lock, flags); | 
 | 231 | 	ret = __clk_enable(clk); | 
 | 232 | 	spin_unlock_irqrestore(&clock_lock, flags); | 
 | 233 |  | 
 | 234 | 	return ret; | 
 | 235 | } | 
 | 236 | EXPORT_SYMBOL_GPL(clk_enable); | 
 | 237 |  | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 238 | static LIST_HEAD(root_clks); | 
 | 239 |  | 
 | 240 | /** | 
 | 241 |  * recalculate_root_clocks - recalculate and propagate all root clocks | 
 | 242 |  * | 
 | 243 |  * Recalculates all root clocks (clocks with no parent), which if the | 
 | 244 |  * clock's .recalc is set correctly, should also propagate their rates. | 
 | 245 |  * Called at init. | 
 | 246 |  */ | 
 | 247 | void recalculate_root_clocks(void) | 
 | 248 | { | 
 | 249 | 	struct clk *clkp; | 
 | 250 |  | 
 | 251 | 	list_for_each_entry(clkp, &root_clks, sibling) { | 
| Paul Mundt | d672fef | 2009-05-13 17:03:09 +0900 | [diff] [blame] | 252 | 		if (clkp->ops && clkp->ops->recalc) | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 253 | 			clkp->rate = clkp->ops->recalc(clkp); | 
 | 254 | 		propagate_rate(clkp); | 
 | 255 | 	} | 
 | 256 | } | 
 | 257 |  | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 258 | int clk_register(struct clk *clk) | 
 | 259 | { | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 260 | 	if (clk == NULL || IS_ERR(clk)) | 
 | 261 | 		return -EINVAL; | 
 | 262 |  | 
 | 263 | 	/* | 
 | 264 | 	 * trap out already registered clocks | 
 | 265 | 	 */ | 
 | 266 | 	if (clk->node.next || clk->node.prev) | 
 | 267 | 		return 0; | 
 | 268 |  | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 269 | 	mutex_lock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 270 |  | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 271 | 	INIT_LIST_HEAD(&clk->children); | 
| Paul Mundt | 4ff29ff | 2009-05-12 05:14:53 +0900 | [diff] [blame] | 272 | 	clk->usecount = 0; | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 273 |  | 
 | 274 | 	if (clk->parent) | 
 | 275 | 		list_add(&clk->sibling, &clk->parent->children); | 
 | 276 | 	else | 
 | 277 | 		list_add(&clk->sibling, &root_clks); | 
 | 278 |  | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 279 | 	list_add(&clk->node, &clock_list); | 
| Paul Mundt | d672fef | 2009-05-13 17:03:09 +0900 | [diff] [blame] | 280 | 	if (clk->ops && clk->ops->init) | 
| Paul Mundt | 4ff29ff | 2009-05-12 05:14:53 +0900 | [diff] [blame] | 281 | 		clk->ops->init(clk); | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 282 | 	mutex_unlock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 283 |  | 
 | 284 | 	return 0; | 
 | 285 | } | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 286 | EXPORT_SYMBOL_GPL(clk_register); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 287 |  | 
 | 288 | void clk_unregister(struct clk *clk) | 
 | 289 | { | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 290 | 	mutex_lock(&clock_list_sem); | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 291 | 	list_del(&clk->sibling); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 292 | 	list_del(&clk->node); | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 293 | 	mutex_unlock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 294 | } | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 295 | EXPORT_SYMBOL_GPL(clk_unregister); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 296 |  | 
| Paul Mundt | 4ff29ff | 2009-05-12 05:14:53 +0900 | [diff] [blame] | 297 | static void clk_enable_init_clocks(void) | 
 | 298 | { | 
 | 299 | 	struct clk *clkp; | 
 | 300 |  | 
 | 301 | 	list_for_each_entry(clkp, &clock_list, node) | 
 | 302 | 		if (clkp->flags & CLK_ENABLE_ON_INIT) | 
 | 303 | 			clk_enable(clkp); | 
 | 304 | } | 
 | 305 |  | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 306 | unsigned long clk_get_rate(struct clk *clk) | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 307 | { | 
 | 308 | 	return clk->rate; | 
 | 309 | } | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 310 | EXPORT_SYMBOL_GPL(clk_get_rate); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 311 |  | 
 | 312 | int clk_set_rate(struct clk *clk, unsigned long rate) | 
 | 313 | { | 
| dmitry pervushin | 1929cb3 | 2007-04-24 13:39:09 +0900 | [diff] [blame] | 314 | 	return clk_set_rate_ex(clk, rate, 0); | 
 | 315 | } | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 316 | EXPORT_SYMBOL_GPL(clk_set_rate); | 
| dmitry pervushin | 1929cb3 | 2007-04-24 13:39:09 +0900 | [diff] [blame] | 317 |  | 
 | 318 | int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) | 
 | 319 | { | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 320 | 	int ret = -EOPNOTSUPP; | 
| Paul Mundt | 100890c | 2009-05-13 17:05:51 +0900 | [diff] [blame] | 321 | 	unsigned long flags; | 
 | 322 |  | 
 | 323 | 	spin_lock_irqsave(&clock_lock, flags); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 324 |  | 
 | 325 | 	if (likely(clk->ops && clk->ops->set_rate)) { | 
| dmitry pervushin | 1929cb3 | 2007-04-24 13:39:09 +0900 | [diff] [blame] | 326 | 		ret = clk->ops->set_rate(clk, rate, algo_id); | 
| Paul Mundt | 100890c | 2009-05-13 17:05:51 +0900 | [diff] [blame] | 327 | 		if (ret != 0) | 
 | 328 | 			goto out_unlock; | 
 | 329 | 	} else { | 
 | 330 | 		clk->rate = rate; | 
 | 331 | 		ret = 0; | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 332 | 	} | 
 | 333 |  | 
| Paul Mundt | 100890c | 2009-05-13 17:05:51 +0900 | [diff] [blame] | 334 | 	if (clk->ops && clk->ops->recalc) | 
 | 335 | 		clk->rate = clk->ops->recalc(clk); | 
 | 336 |  | 
 | 337 | 	propagate_rate(clk); | 
 | 338 |  | 
 | 339 | out_unlock: | 
 | 340 | 	spin_unlock_irqrestore(&clock_lock, flags); | 
 | 341 |  | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 342 | 	return ret; | 
 | 343 | } | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 344 | EXPORT_SYMBOL_GPL(clk_set_rate_ex); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 345 |  | 
| Francesco VIRLINZI | d680c76 | 2009-03-11 07:40:54 +0000 | [diff] [blame] | 346 | int clk_set_parent(struct clk *clk, struct clk *parent) | 
 | 347 | { | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 348 | 	unsigned long flags; | 
| Francesco VIRLINZI | d680c76 | 2009-03-11 07:40:54 +0000 | [diff] [blame] | 349 | 	int ret = -EINVAL; | 
| Francesco VIRLINZI | d680c76 | 2009-03-11 07:40:54 +0000 | [diff] [blame] | 350 |  | 
 | 351 | 	if (!parent || !clk) | 
 | 352 | 		return ret; | 
| Paul Mundt | aa87aa3 | 2009-05-12 05:51:05 +0900 | [diff] [blame] | 353 | 	if (clk->parent == parent) | 
 | 354 | 		return 0; | 
| Francesco VIRLINZI | d680c76 | 2009-03-11 07:40:54 +0000 | [diff] [blame] | 355 |  | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 356 | 	spin_lock_irqsave(&clock_lock, flags); | 
 | 357 | 	if (clk->usecount == 0) { | 
 | 358 | 		if (clk->ops->set_parent) | 
 | 359 | 			ret = clk->ops->set_parent(clk, parent); | 
| Paul Mundt | aa87aa3 | 2009-05-12 05:51:05 +0900 | [diff] [blame] | 360 | 		else | 
 | 361 | 			ret = clk_reparent(clk, parent); | 
 | 362 |  | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 363 | 		if (ret == 0) { | 
| Paul Mundt | aa87aa3 | 2009-05-12 05:51:05 +0900 | [diff] [blame] | 364 | 			pr_debug("clock: set parent of %s to %s (new rate %ld)\n", | 
 | 365 | 				 clk->name, clk->parent->name, clk->rate); | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 366 | 			if (clk->ops->recalc) | 
 | 367 | 				clk->rate = clk->ops->recalc(clk); | 
 | 368 | 			propagate_rate(clk); | 
 | 369 | 		} | 
 | 370 | 	} else | 
 | 371 | 		ret = -EBUSY; | 
 | 372 | 	spin_unlock_irqrestore(&clock_lock, flags); | 
| Francesco VIRLINZI | d680c76 | 2009-03-11 07:40:54 +0000 | [diff] [blame] | 373 |  | 
| Francesco VIRLINZI | d680c76 | 2009-03-11 07:40:54 +0000 | [diff] [blame] | 374 | 	return ret; | 
 | 375 | } | 
 | 376 | EXPORT_SYMBOL_GPL(clk_set_parent); | 
 | 377 |  | 
 | 378 | struct clk *clk_get_parent(struct clk *clk) | 
 | 379 | { | 
 | 380 | 	return clk->parent; | 
 | 381 | } | 
 | 382 | EXPORT_SYMBOL_GPL(clk_get_parent); | 
 | 383 |  | 
| Paul Mundt | f6991b0 | 2007-07-20 13:29:09 +0900 | [diff] [blame] | 384 | long clk_round_rate(struct clk *clk, unsigned long rate) | 
 | 385 | { | 
 | 386 | 	if (likely(clk->ops && clk->ops->round_rate)) { | 
 | 387 | 		unsigned long flags, rounded; | 
 | 388 |  | 
 | 389 | 		spin_lock_irqsave(&clock_lock, flags); | 
 | 390 | 		rounded = clk->ops->round_rate(clk, rate); | 
 | 391 | 		spin_unlock_irqrestore(&clock_lock, flags); | 
 | 392 |  | 
 | 393 | 		return rounded; | 
 | 394 | 	} | 
 | 395 |  | 
 | 396 | 	return clk_get_rate(clk); | 
 | 397 | } | 
 | 398 | EXPORT_SYMBOL_GPL(clk_round_rate); | 
 | 399 |  | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 400 | /* | 
| Paul Mundt | 0dae895 | 2009-05-12 06:18:09 +0900 | [diff] [blame] | 401 |  * Find the correct struct clk for the device and connection ID. | 
 | 402 |  * We do slightly fuzzy matching here: | 
 | 403 |  *  An entry with a NULL ID is assumed to be a wildcard. | 
 | 404 |  *  If an entry has a device ID, it must match | 
 | 405 |  *  If an entry has a connection ID, it must match | 
 | 406 |  * Then we take the most specific entry - with the following | 
 | 407 |  * order of precidence: dev+con > dev only > con only. | 
 | 408 |  */ | 
 | 409 | static struct clk *clk_find(const char *dev_id, const char *con_id) | 
 | 410 | { | 
 | 411 | 	struct clk_lookup *p; | 
 | 412 | 	struct clk *clk = NULL; | 
 | 413 | 	int match, best = 0; | 
 | 414 |  | 
 | 415 | 	list_for_each_entry(p, &clock_list, node) { | 
 | 416 | 		match = 0; | 
 | 417 | 		if (p->dev_id) { | 
 | 418 | 			if (!dev_id || strcmp(p->dev_id, dev_id)) | 
 | 419 | 				continue; | 
 | 420 | 			match += 2; | 
 | 421 | 		} | 
 | 422 | 		if (p->con_id) { | 
 | 423 | 			if (!con_id || strcmp(p->con_id, con_id)) | 
 | 424 | 				continue; | 
 | 425 | 			match += 1; | 
 | 426 | 		} | 
 | 427 | 		if (match == 0) | 
 | 428 | 			continue; | 
 | 429 |  | 
 | 430 | 		if (match > best) { | 
 | 431 | 			clk = p->clk; | 
 | 432 | 			best = match; | 
 | 433 | 		} | 
 | 434 | 	} | 
 | 435 | 	return clk; | 
 | 436 | } | 
 | 437 |  | 
 | 438 | struct clk *clk_get_sys(const char *dev_id, const char *con_id) | 
 | 439 | { | 
 | 440 | 	struct clk *clk; | 
 | 441 |  | 
 | 442 | 	mutex_lock(&clock_list_sem); | 
 | 443 | 	clk = clk_find(dev_id, con_id); | 
 | 444 | 	mutex_unlock(&clock_list_sem); | 
 | 445 |  | 
 | 446 | 	return clk ? clk : ERR_PTR(-ENOENT); | 
 | 447 | } | 
 | 448 | EXPORT_SYMBOL_GPL(clk_get_sys); | 
 | 449 |  | 
 | 450 | /* | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 451 |  * Returns a clock. Note that we first try to use device id on the bus | 
 | 452 |  * and clock name. If this fails, we try to use clock name only. | 
 | 453 |  */ | 
 | 454 | struct clk *clk_get(struct device *dev, const char *id) | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 455 | { | 
| Paul Mundt | 0dae895 | 2009-05-12 06:18:09 +0900 | [diff] [blame] | 456 | 	const char *dev_id = dev ? dev_name(dev) : NULL; | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 457 | 	struct clk *p, *clk = ERR_PTR(-ENOENT); | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 458 | 	int idno; | 
 | 459 |  | 
| Paul Mundt | 0dae895 | 2009-05-12 06:18:09 +0900 | [diff] [blame] | 460 | 	clk = clk_get_sys(dev_id, id); | 
| Paul Mundt | f3f8290 | 2009-05-12 16:07:40 +0900 | [diff] [blame] | 461 | 	if (clk && !IS_ERR(clk)) | 
| Paul Mundt | 0dae895 | 2009-05-12 06:18:09 +0900 | [diff] [blame] | 462 | 		return clk; | 
 | 463 |  | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 464 | 	if (dev == NULL || dev->bus != &platform_bus_type) | 
 | 465 | 		idno = -1; | 
 | 466 | 	else | 
 | 467 | 		idno = to_platform_device(dev)->id; | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 468 |  | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 469 | 	mutex_lock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 470 | 	list_for_each_entry(p, &clock_list, node) { | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 471 | 		if (p->id == idno && | 
 | 472 | 		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) { | 
 | 473 | 			clk = p; | 
 | 474 | 			goto found; | 
 | 475 | 		} | 
 | 476 | 	} | 
 | 477 |  | 
 | 478 | 	list_for_each_entry(p, &clock_list, node) { | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 479 | 		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { | 
 | 480 | 			clk = p; | 
 | 481 | 			break; | 
 | 482 | 		} | 
 | 483 | 	} | 
| Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 484 |  | 
 | 485 | found: | 
| Paul Mundt | 237b98f | 2006-09-27 17:28:20 +0900 | [diff] [blame] | 486 | 	mutex_unlock(&clock_list_sem); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 487 |  | 
 | 488 | 	return clk; | 
 | 489 | } | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 490 | EXPORT_SYMBOL_GPL(clk_get); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 491 |  | 
 | 492 | void clk_put(struct clk *clk) | 
 | 493 | { | 
 | 494 | 	if (clk && !IS_ERR(clk)) | 
 | 495 | 		module_put(clk->owner); | 
 | 496 | } | 
| Paul Mundt | db62e5b | 2007-04-26 12:17:20 +0900 | [diff] [blame] | 497 | EXPORT_SYMBOL_GPL(clk_put); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 498 |  | 
| Francesco VIRLINZI | 4a55026 | 2009-03-11 07:42:05 +0000 | [diff] [blame] | 499 | #ifdef CONFIG_PM | 
 | 500 | static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) | 
 | 501 | { | 
 | 502 | 	static pm_message_t prev_state; | 
 | 503 | 	struct clk *clkp; | 
 | 504 |  | 
 | 505 | 	switch (state.event) { | 
 | 506 | 	case PM_EVENT_ON: | 
 | 507 | 		/* Resumeing from hibernation */ | 
| Paul Mundt | b68d820 | 2009-05-12 03:45:08 +0900 | [diff] [blame] | 508 | 		if (prev_state.event != PM_EVENT_FREEZE) | 
 | 509 | 			break; | 
| Francesco VIRLINZI | 50cca71 | 2009-03-13 08:08:01 +0000 | [diff] [blame] | 510 |  | 
| Paul Mundt | b68d820 | 2009-05-12 03:45:08 +0900 | [diff] [blame] | 511 | 		list_for_each_entry(clkp, &clock_list, node) { | 
 | 512 | 			if (likely(clkp->ops)) { | 
 | 513 | 				unsigned long rate = clkp->rate; | 
 | 514 |  | 
 | 515 | 				if (likely(clkp->ops->set_parent)) | 
 | 516 | 					clkp->ops->set_parent(clkp, | 
 | 517 | 						clkp->parent); | 
 | 518 | 				if (likely(clkp->ops->set_rate)) | 
 | 519 | 					clkp->ops->set_rate(clkp, | 
 | 520 | 						rate, NO_CHANGE); | 
 | 521 | 				else if (likely(clkp->ops->recalc)) | 
 | 522 | 					clkp->rate = clkp->ops->recalc(clkp); | 
 | 523 | 			} | 
| Francesco VIRLINZI | 4a55026 | 2009-03-11 07:42:05 +0000 | [diff] [blame] | 524 | 		} | 
 | 525 | 		break; | 
 | 526 | 	case PM_EVENT_FREEZE: | 
 | 527 | 		break; | 
 | 528 | 	case PM_EVENT_SUSPEND: | 
 | 529 | 		break; | 
 | 530 | 	} | 
 | 531 |  | 
 | 532 | 	prev_state = state; | 
 | 533 | 	return 0; | 
 | 534 | } | 
 | 535 |  | 
 | 536 | static int clks_sysdev_resume(struct sys_device *dev) | 
 | 537 | { | 
 | 538 | 	return clks_sysdev_suspend(dev, PMSG_ON); | 
 | 539 | } | 
 | 540 |  | 
 | 541 | static struct sysdev_class clks_sysdev_class = { | 
 | 542 | 	.name = "clks", | 
 | 543 | }; | 
 | 544 |  | 
 | 545 | static struct sysdev_driver clks_sysdev_driver = { | 
 | 546 | 	.suspend = clks_sysdev_suspend, | 
 | 547 | 	.resume = clks_sysdev_resume, | 
 | 548 | }; | 
 | 549 |  | 
 | 550 | static struct sys_device clks_sysdev_dev = { | 
 | 551 | 	.cls = &clks_sysdev_class, | 
 | 552 | }; | 
 | 553 |  | 
 | 554 | static int __init clk_sysdev_init(void) | 
 | 555 | { | 
 | 556 | 	sysdev_class_register(&clks_sysdev_class); | 
 | 557 | 	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); | 
 | 558 | 	sysdev_register(&clks_sysdev_dev); | 
 | 559 |  | 
 | 560 | 	return 0; | 
 | 561 | } | 
 | 562 | subsys_initcall(clk_sysdev_init); | 
 | 563 | #endif | 
 | 564 |  | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 565 | int __init clk_init(void) | 
 | 566 | { | 
| Paul Mundt | 253b088 | 2009-05-13 17:38:11 +0900 | [diff] [blame] | 567 | 	int ret; | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 568 |  | 
| Paul Mundt | 253b088 | 2009-05-13 17:38:11 +0900 | [diff] [blame] | 569 | 	ret = arch_clk_init(); | 
 | 570 | 	if (unlikely(ret)) { | 
 | 571 | 		pr_err("%s: CPU clock registration failed.\n", __func__); | 
 | 572 | 		return ret; | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 573 | 	} | 
 | 574 |  | 
| Paul Mundt | 253b088 | 2009-05-13 17:38:11 +0900 | [diff] [blame] | 575 | 	if (sh_mv.mv_clk_init) { | 
 | 576 | 		ret = sh_mv.mv_clk_init(); | 
 | 577 | 		if (unlikely(ret)) { | 
 | 578 | 			pr_err("%s: machvec clock initialization failed.\n", | 
 | 579 | 			       __func__); | 
 | 580 | 			return ret; | 
 | 581 | 		} | 
 | 582 | 	} | 
| dmitry pervushin | dfbbbe9 | 2007-05-15 08:42:22 +0900 | [diff] [blame] | 583 |  | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 584 | 	/* Kick the child clocks.. */ | 
| Paul Mundt | b1f6cfe | 2009-05-12 04:27:43 +0900 | [diff] [blame] | 585 | 	recalculate_root_clocks(); | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 586 |  | 
| Paul Mundt | 4ff29ff | 2009-05-12 05:14:53 +0900 | [diff] [blame] | 587 | 	/* Enable the necessary init clocks */ | 
 | 588 | 	clk_enable_init_clocks(); | 
 | 589 |  | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 590 | 	return ret; | 
 | 591 | } | 
 | 592 |  | 
| Paul Mundt | cedcf33 | 2009-05-13 21:51:28 +0900 | [diff] [blame] | 593 | /* | 
 | 594 |  *	debugfs support to trace clock tree hierarchy and attributes | 
 | 595 |  */ | 
 | 596 | static struct dentry *clk_debugfs_root; | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 597 |  | 
| Paul Mundt | cedcf33 | 2009-05-13 21:51:28 +0900 | [diff] [blame] | 598 | static int clk_debugfs_register_one(struct clk *c) | 
 | 599 | { | 
 | 600 | 	int err; | 
 | 601 | 	struct dentry *d, *child; | 
 | 602 | 	struct clk *pa = c->parent; | 
 | 603 | 	char s[255]; | 
 | 604 | 	char *p = s; | 
 | 605 |  | 
 | 606 | 	p += sprintf(p, "%s", c->name); | 
| Paul Mundt | 549b5e3 | 2009-05-14 17:38:46 +0900 | [diff] [blame] | 607 | 	if (c->id >= 0) | 
| Paul Mundt | cedcf33 | 2009-05-13 21:51:28 +0900 | [diff] [blame] | 608 | 		sprintf(p, ":%d", c->id); | 
 | 609 | 	d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root); | 
 | 610 | 	if (!d) | 
 | 611 | 		return -ENOMEM; | 
 | 612 | 	c->dentry = d; | 
 | 613 |  | 
 | 614 | 	d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount); | 
 | 615 | 	if (!d) { | 
 | 616 | 		err = -ENOMEM; | 
 | 617 | 		goto err_out; | 
 | 618 | 	} | 
 | 619 | 	d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate); | 
 | 620 | 	if (!d) { | 
 | 621 | 		err = -ENOMEM; | 
 | 622 | 		goto err_out; | 
 | 623 | 	} | 
 | 624 | 	d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags); | 
 | 625 | 	if (!d) { | 
 | 626 | 		err = -ENOMEM; | 
 | 627 | 		goto err_out; | 
 | 628 | 	} | 
 | 629 | 	return 0; | 
 | 630 |  | 
 | 631 | err_out: | 
 | 632 | 	d = c->dentry; | 
 | 633 | 	list_for_each_entry(child, &d->d_subdirs, d_u.d_child) | 
 | 634 | 		debugfs_remove(child); | 
 | 635 | 	debugfs_remove(c->dentry); | 
 | 636 | 	return err; | 
 | 637 | } | 
 | 638 |  | 
 | 639 | static int clk_debugfs_register(struct clk *c) | 
 | 640 | { | 
 | 641 | 	int err; | 
 | 642 | 	struct clk *pa = c->parent; | 
 | 643 |  | 
 | 644 | 	if (pa && !pa->dentry) { | 
 | 645 | 		err = clk_debugfs_register(pa); | 
 | 646 | 		if (err) | 
 | 647 | 			return err; | 
 | 648 | 	} | 
 | 649 |  | 
 | 650 | 	if (!c->dentry) { | 
 | 651 | 		err = clk_debugfs_register_one(c); | 
 | 652 | 		if (err) | 
 | 653 | 			return err; | 
 | 654 | 	} | 
| Paul Mundt | 36ddf31 | 2006-01-16 22:14:17 -0800 | [diff] [blame] | 655 | 	return 0; | 
 | 656 | } | 
| Paul Mundt | cedcf33 | 2009-05-13 21:51:28 +0900 | [diff] [blame] | 657 |  | 
 | 658 | static int __init clk_debugfs_init(void) | 
 | 659 | { | 
 | 660 | 	struct clk *c; | 
 | 661 | 	struct dentry *d; | 
 | 662 | 	int err; | 
 | 663 |  | 
 | 664 | 	d = debugfs_create_dir("clock", NULL); | 
 | 665 | 	if (!d) | 
 | 666 | 		return -ENOMEM; | 
 | 667 | 	clk_debugfs_root = d; | 
 | 668 |  | 
 | 669 | 	list_for_each_entry(c, &clock_list, node) { | 
 | 670 | 		err = clk_debugfs_register(c); | 
 | 671 | 		if (err) | 
 | 672 | 			goto err_out; | 
 | 673 | 	} | 
 | 674 | 	return 0; | 
 | 675 | err_out: | 
 | 676 | 	debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */ | 
 | 677 | 	return err; | 
 | 678 | } | 
 | 679 | late_initcall(clk_debugfs_init); |