| Duy Truong | e833aca | 2013-02-12 13:35:08 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * | 
|  | 3 | * This program is free software; you can redistribute it and/or modify | 
|  | 4 | * it under the terms of the GNU General Public License version 2 and | 
|  | 5 | * only version 2 as published by the Free Software Foundation. | 
|  | 6 | * | 
|  | 7 | * This program is distributed in the hope that it will be useful, | 
|  | 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 10 | * GNU General Public License for more details. | 
|  | 11 | * | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | #define pr_fmt(fmt) "%s: " fmt, __func__ | 
|  | 15 |  | 
|  | 16 | #include <linux/kernel.h> | 
|  | 17 | #include <linux/init.h> | 
|  | 18 | #include <linux/err.h> | 
|  | 19 | #include <linux/ctype.h> | 
|  | 20 | #include <linux/bitops.h> | 
|  | 21 | #include <linux/io.h> | 
|  | 22 | #include <linux/spinlock.h> | 
|  | 23 | #include <linux/delay.h> | 
|  | 24 | #include <linux/clk.h> | 
|  | 25 |  | 
|  | 26 | #include <mach/msm_iomap.h> | 
| Matt Wagantall | d55b90f | 2012-02-23 23:27:44 -0800 | [diff] [blame] | 27 | #include <mach/clk-provider.h> | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 28 | #include <mach/clk.h> | 
|  | 29 | #include <mach/scm-io.h> | 
|  | 30 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 31 | #include "clock-local.h" | 
|  | 32 |  | 
|  | 33 | #ifdef CONFIG_MSM_SECURE_IO | 
|  | 34 | #undef readl_relaxed | 
|  | 35 | #undef writel_relaxed | 
|  | 36 | #define readl_relaxed secure_readl | 
|  | 37 | #define writel_relaxed secure_writel | 
|  | 38 | #endif | 
|  | 39 |  | 
|  | 40 | /* | 
|  | 41 | * When enabling/disabling a clock, check the halt bit up to this number | 
|  | 42 | * number of times (with a 1 us delay in between) before continuing. | 
|  | 43 | */ | 
| Stephen Boyd | 138da0e | 2011-08-05 13:25:57 -0700 | [diff] [blame] | 44 | #define HALT_CHECK_MAX_LOOPS	200 | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 45 | /* For clock without halt checking, wait this long after enables/disables. */ | 
|  | 46 | #define HALT_CHECK_DELAY_US	10 | 
|  | 47 |  | 
|  | 48 | DEFINE_SPINLOCK(local_clock_reg_lock); | 
| Matt Wagantall | 84f43fd | 2011-08-16 23:28:38 -0700 | [diff] [blame] | 49 | struct clk_freq_tbl rcg_dummy_freq = F_END; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 50 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 51 | /* | 
|  | 52 | * Common Set-Rate Functions | 
|  | 53 | */ | 
|  | 54 |  | 
|  | 55 | /* For clocks with MND dividers. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 56 | void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 57 | { | 
|  | 58 | uint32_t ns_reg_val, ctl_reg_val; | 
|  | 59 |  | 
|  | 60 | /* Assert MND reset. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 61 | ns_reg_val = readl_relaxed(rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 62 | ns_reg_val |= BIT(7); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 63 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 64 |  | 
|  | 65 | /* Program M and D values. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 66 | writel_relaxed(nf->md_val, rcg->md_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 67 |  | 
|  | 68 | /* If the clock has a separate CC register, program it. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 69 | if (rcg->ns_reg != rcg->b.ctl_reg) { | 
|  | 70 | ctl_reg_val = readl_relaxed(rcg->b.ctl_reg); | 
|  | 71 | ctl_reg_val &= ~(rcg->ctl_mask); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 72 | ctl_reg_val |= nf->ctl_val; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 73 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 74 | } | 
|  | 75 |  | 
|  | 76 | /* Deassert MND reset. */ | 
|  | 77 | ns_reg_val &= ~BIT(7); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 78 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 81 | void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 82 | { | 
|  | 83 | /* | 
|  | 84 | * Nothing to do for fixed-rate or integer-divider clocks. Any settings | 
|  | 85 | * in NS registers are applied in the enable path, since power can be | 
|  | 86 | * saved by leaving an un-clocked or slowly-clocked source selected | 
|  | 87 | * until the clock is enabled. | 
|  | 88 | */ | 
|  | 89 | } | 
|  | 90 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 91 | void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 92 | { | 
|  | 93 | uint32_t ctl_reg_val; | 
|  | 94 |  | 
|  | 95 | /* Assert MND reset. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 96 | ctl_reg_val = readl_relaxed(rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 97 | ctl_reg_val |= BIT(8); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 98 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 99 |  | 
|  | 100 | /* Program M and D values. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 101 | writel_relaxed(nf->md_val, rcg->md_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 102 |  | 
|  | 103 | /* Program MN counter Enable and Mode. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 104 | ctl_reg_val &= ~(rcg->ctl_mask); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 105 | ctl_reg_val |= nf->ctl_val; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 106 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 107 |  | 
|  | 108 | /* Deassert MND reset. */ | 
|  | 109 | ctl_reg_val &= ~BIT(8); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 110 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 111 | } | 
|  | 112 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 113 | void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 114 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 115 | struct bank_masks *banks = rcg->bank_info; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 116 | const struct bank_mask_info *new_bank_masks; | 
|  | 117 | const struct bank_mask_info *old_bank_masks; | 
|  | 118 | uint32_t ns_reg_val, ctl_reg_val; | 
|  | 119 | uint32_t bank_sel; | 
|  | 120 |  | 
|  | 121 | /* | 
|  | 122 | * Determine active bank and program the other one. If the clock is | 
|  | 123 | * off, program the active bank since bank switching won't work if | 
|  | 124 | * both banks aren't running. | 
|  | 125 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 126 | ctl_reg_val = readl_relaxed(rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 127 | bank_sel = !!(ctl_reg_val & banks->bank_sel_mask); | 
|  | 128 | /* If clock isn't running, don't switch banks. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 129 | bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 130 | if (bank_sel == 0) { | 
|  | 131 | new_bank_masks = &banks->bank1_mask; | 
|  | 132 | old_bank_masks = &banks->bank0_mask; | 
|  | 133 | } else { | 
|  | 134 | new_bank_masks = &banks->bank0_mask; | 
|  | 135 | old_bank_masks = &banks->bank1_mask; | 
|  | 136 | } | 
|  | 137 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 138 | ns_reg_val = readl_relaxed(rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 139 |  | 
|  | 140 | /* Assert bank MND reset. */ | 
|  | 141 | ns_reg_val |= new_bank_masks->rst_mask; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 142 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 143 |  | 
|  | 144 | /* | 
|  | 145 | * Program NS only if the clock is enabled, since the NS will be set | 
|  | 146 | * as part of the enable procedure and should remain with a low-power | 
|  | 147 | * MUX input selected until then. | 
|  | 148 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 149 | if (rcg->enabled) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 150 | ns_reg_val &= ~(new_bank_masks->ns_mask); | 
|  | 151 | ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 152 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 153 | } | 
|  | 154 |  | 
|  | 155 | writel_relaxed(nf->md_val, new_bank_masks->md_reg); | 
|  | 156 |  | 
|  | 157 | /* Enable counter only if clock is enabled. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 158 | if (rcg->enabled) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 159 | ctl_reg_val |= new_bank_masks->mnd_en_mask; | 
|  | 160 | else | 
|  | 161 | ctl_reg_val &= ~(new_bank_masks->mnd_en_mask); | 
|  | 162 |  | 
|  | 163 | ctl_reg_val &= ~(new_bank_masks->mode_mask); | 
|  | 164 | ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 165 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 166 |  | 
|  | 167 | /* Deassert bank MND reset. */ | 
|  | 168 | ns_reg_val &= ~(new_bank_masks->rst_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 169 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 170 |  | 
|  | 171 | /* | 
|  | 172 | * Switch to the new bank if clock is running.  If it isn't, then | 
|  | 173 | * no switch is necessary since we programmed the active bank. | 
|  | 174 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 175 | if (rcg->enabled && rcg->current_freq->freq_hz) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 176 | ctl_reg_val ^= banks->bank_sel_mask; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 177 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 178 | /* | 
|  | 179 | * Wait at least 6 cycles of slowest bank's clock | 
|  | 180 | * for the glitch-free MUX to fully switch sources. | 
|  | 181 | */ | 
|  | 182 | mb(); | 
|  | 183 | udelay(1); | 
|  | 184 |  | 
|  | 185 | /* Disable old bank's MN counter. */ | 
|  | 186 | ctl_reg_val &= ~(old_bank_masks->mnd_en_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 187 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 188 |  | 
|  | 189 | /* Program old bank to a low-power source and divider. */ | 
|  | 190 | ns_reg_val &= ~(old_bank_masks->ns_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 191 | ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask); | 
|  | 192 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 193 | } | 
|  | 194 |  | 
| Matt Wagantall | 07c4547 | 2012-02-10 23:27:24 -0800 | [diff] [blame] | 195 | /* Update the MND_EN and NS masks to match the current bank. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 196 | rcg->mnd_en_mask = new_bank_masks->mnd_en_mask; | 
|  | 197 | rcg->ns_mask = new_bank_masks->ns_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 198 | } | 
|  | 199 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 200 | void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 201 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 202 | struct bank_masks *banks = rcg->bank_info; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 203 | const struct bank_mask_info *new_bank_masks; | 
|  | 204 | const struct bank_mask_info *old_bank_masks; | 
|  | 205 | uint32_t ns_reg_val, bank_sel; | 
|  | 206 |  | 
|  | 207 | /* | 
|  | 208 | * Determine active bank and program the other one. If the clock is | 
|  | 209 | * off, program the active bank since bank switching won't work if | 
|  | 210 | * both banks aren't running. | 
|  | 211 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 212 | ns_reg_val = readl_relaxed(rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 213 | bank_sel = !!(ns_reg_val & banks->bank_sel_mask); | 
|  | 214 | /* If clock isn't running, don't switch banks. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 215 | bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 216 | if (bank_sel == 0) { | 
|  | 217 | new_bank_masks = &banks->bank1_mask; | 
|  | 218 | old_bank_masks = &banks->bank0_mask; | 
|  | 219 | } else { | 
|  | 220 | new_bank_masks = &banks->bank0_mask; | 
|  | 221 | old_bank_masks = &banks->bank1_mask; | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | /* | 
|  | 225 | * Program NS only if the clock is enabled, since the NS will be set | 
|  | 226 | * as part of the enable procedure and should remain with a low-power | 
|  | 227 | * MUX input selected until then. | 
|  | 228 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 229 | if (rcg->enabled) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 230 | ns_reg_val &= ~(new_bank_masks->ns_mask); | 
|  | 231 | ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 232 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 233 | } | 
|  | 234 |  | 
|  | 235 | /* | 
|  | 236 | * Switch to the new bank if clock is running.  If it isn't, then | 
|  | 237 | * no switch is necessary since we programmed the active bank. | 
|  | 238 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 239 | if (rcg->enabled && rcg->current_freq->freq_hz) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 240 | ns_reg_val ^= banks->bank_sel_mask; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 241 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 242 | /* | 
|  | 243 | * Wait at least 6 cycles of slowest bank's clock | 
|  | 244 | * for the glitch-free MUX to fully switch sources. | 
|  | 245 | */ | 
|  | 246 | mb(); | 
|  | 247 | udelay(1); | 
|  | 248 |  | 
|  | 249 | /* Program old bank to a low-power source and divider. */ | 
|  | 250 | ns_reg_val &= ~(old_bank_masks->ns_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 251 | ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask); | 
|  | 252 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 253 | } | 
|  | 254 |  | 
|  | 255 | /* Update the NS mask to match the current bank. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 256 | rcg->ns_mask = new_bank_masks->ns_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 257 | } | 
|  | 258 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 259 | /* | 
|  | 260 | * Clock enable/disable functions | 
|  | 261 | */ | 
|  | 262 |  | 
|  | 263 | /* Return non-zero if a clock status registers shows the clock is halted. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 264 | static int branch_clk_is_halted(const struct branch *b) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 265 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 266 | int invert = (b->halt_check == ENABLE); | 
|  | 267 | int status_bit = readl_relaxed(b->halt_reg) & BIT(b->halt_bit); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 268 | return invert ? !status_bit : status_bit; | 
|  | 269 | } | 
|  | 270 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 271 | static int branch_in_hwcg_mode(const struct branch *b) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 272 | { | 
|  | 273 | if (!b->hwcg_mask) | 
|  | 274 | return 0; | 
|  | 275 |  | 
|  | 276 | return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask); | 
|  | 277 | } | 
|  | 278 |  | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 279 | void __branch_enable_reg(const struct branch *b, const char *name) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 280 | { | 
|  | 281 | u32 reg_val; | 
|  | 282 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 283 | if (b->en_mask) { | 
|  | 284 | reg_val = readl_relaxed(b->ctl_reg); | 
|  | 285 | reg_val |= b->en_mask; | 
|  | 286 | writel_relaxed(reg_val, b->ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 287 | } | 
|  | 288 |  | 
|  | 289 | /* | 
|  | 290 | * Use a memory barrier since some halt status registers are | 
|  | 291 | * not within the same 1K segment as the branch/root enable | 
|  | 292 | * registers.  It's also needed in the udelay() case to ensure | 
|  | 293 | * the delay starts after the branch enable. | 
|  | 294 | */ | 
|  | 295 | mb(); | 
|  | 296 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 297 | /* Skip checking halt bit if the clock is in hardware gated mode */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 298 | if (branch_in_hwcg_mode(b)) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 299 | return; | 
|  | 300 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 301 | /* Wait for clock to enable before returning. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 302 | if (b->halt_check == DELAY) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 303 | udelay(HALT_CHECK_DELAY_US); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 304 | } else if (b->halt_check == ENABLE || b->halt_check == HALT | 
|  | 305 | || b->halt_check == ENABLE_VOTED | 
|  | 306 | || b->halt_check == HALT_VOTED) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 307 | int count; | 
|  | 308 |  | 
|  | 309 | /* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 310 | for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(b) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 311 | && count > 0; count--) | 
|  | 312 | udelay(1); | 
|  | 313 | WARN(count == 0, "%s status stuck at 'off'", name); | 
|  | 314 | } | 
|  | 315 | } | 
|  | 316 |  | 
|  | 317 | /* Perform any register operations required to enable the clock. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 318 | static void __rcg_clk_enable_reg(struct rcg_clk *rcg) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 319 | { | 
|  | 320 | u32 reg_val; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 321 | void __iomem *const reg = rcg->b.ctl_reg; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 322 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 323 | /* | 
|  | 324 | * Program the NS register, if applicable. NS registers are not | 
|  | 325 | * set in the set_rate path because power can be saved by deferring | 
|  | 326 | * the selection of a clocked source until the clock is enabled. | 
|  | 327 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 328 | if (rcg->ns_mask) { | 
|  | 329 | reg_val = readl_relaxed(rcg->ns_reg); | 
|  | 330 | reg_val &= ~(rcg->ns_mask); | 
|  | 331 | reg_val |= (rcg->current_freq->ns_val & rcg->ns_mask); | 
|  | 332 | writel_relaxed(reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 333 | } | 
|  | 334 |  | 
|  | 335 | /* Enable MN counter, if applicable. */ | 
|  | 336 | reg_val = readl_relaxed(reg); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 337 | if (rcg->current_freq->md_val) { | 
|  | 338 | reg_val |= rcg->mnd_en_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 339 | writel_relaxed(reg_val, reg); | 
|  | 340 | } | 
|  | 341 | /* Enable root. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 342 | if (rcg->root_en_mask) { | 
|  | 343 | reg_val |= rcg->root_en_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 344 | writel_relaxed(reg_val, reg); | 
|  | 345 | } | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 346 | __branch_enable_reg(&rcg->b, rcg->c.dbg_name); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 347 | } | 
|  | 348 |  | 
|  | 349 | /* Perform any register operations required to disable the branch. */ | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 350 | u32 __branch_disable_reg(const struct branch *b, const char *name) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 351 | { | 
|  | 352 | u32 reg_val; | 
|  | 353 |  | 
| Matt Wagantall | e3508bb | 2012-07-23 17:18:37 -0700 | [diff] [blame] | 354 | reg_val = b->ctl_reg ? readl_relaxed(b->ctl_reg) : 0; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 355 | if (b->en_mask) { | 
|  | 356 | reg_val &= ~(b->en_mask); | 
|  | 357 | writel_relaxed(reg_val, b->ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 358 | } | 
|  | 359 |  | 
|  | 360 | /* | 
|  | 361 | * Use a memory barrier since some halt status registers are | 
|  | 362 | * not within the same K segment as the branch/root enable | 
|  | 363 | * registers.  It's also needed in the udelay() case to ensure | 
|  | 364 | * the delay starts after the branch disable. | 
|  | 365 | */ | 
|  | 366 | mb(); | 
|  | 367 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 368 | /* Skip checking halt bit if the clock is in hardware gated mode */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 369 | if (branch_in_hwcg_mode(b)) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 370 | return reg_val; | 
|  | 371 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 372 | /* Wait for clock to disable before continuing. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 373 | if (b->halt_check == DELAY || b->halt_check == ENABLE_VOTED | 
|  | 374 | || b->halt_check == HALT_VOTED) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 375 | udelay(HALT_CHECK_DELAY_US); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 376 | } else if (b->halt_check == ENABLE || b->halt_check == HALT) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 377 | int count; | 
|  | 378 |  | 
|  | 379 | /* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 380 | for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(b) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 381 | && count > 0; count--) | 
|  | 382 | udelay(1); | 
|  | 383 | WARN(count == 0, "%s status stuck at 'on'", name); | 
|  | 384 | } | 
|  | 385 |  | 
|  | 386 | return reg_val; | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 | /* Perform any register operations required to disable the generator. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 390 | static void __rcg_clk_disable_reg(struct rcg_clk *rcg) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 391 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 392 | void __iomem *const reg = rcg->b.ctl_reg; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 393 | uint32_t reg_val; | 
|  | 394 |  | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 395 | reg_val = __branch_disable_reg(&rcg->b, rcg->c.dbg_name); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 396 | /* Disable root. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 397 | if (rcg->root_en_mask) { | 
|  | 398 | reg_val &= ~(rcg->root_en_mask); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 399 | writel_relaxed(reg_val, reg); | 
|  | 400 | } | 
|  | 401 | /* Disable MN counter, if applicable. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 402 | if (rcg->current_freq->md_val) { | 
|  | 403 | reg_val &= ~(rcg->mnd_en_mask); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 404 | writel_relaxed(reg_val, reg); | 
|  | 405 | } | 
|  | 406 | /* | 
|  | 407 | * Program NS register to low-power value with an un-clocked or | 
|  | 408 | * slowly-clocked source selected. | 
|  | 409 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 410 | if (rcg->ns_mask) { | 
|  | 411 | reg_val = readl_relaxed(rcg->ns_reg); | 
|  | 412 | reg_val &= ~(rcg->ns_mask); | 
|  | 413 | reg_val |= (rcg->freq_tbl->ns_val & rcg->ns_mask); | 
|  | 414 | writel_relaxed(reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 415 | } | 
|  | 416 | } | 
|  | 417 |  | 
| Stephen Boyd | 2c2875f | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 418 | static int rcg_clk_prepare(struct clk *c) | 
|  | 419 | { | 
|  | 420 | struct rcg_clk *rcg = to_rcg_clk(c); | 
|  | 421 |  | 
|  | 422 | WARN(rcg->current_freq == &rcg_dummy_freq, | 
|  | 423 | "Attempting to prepare %s before setting its rate. " | 
|  | 424 | "Set the rate first!\n", rcg->c.dbg_name); | 
|  | 425 | rcg->prepared = true; | 
|  | 426 |  | 
|  | 427 | return 0; | 
|  | 428 | } | 
|  | 429 |  | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 430 | /* Enable a rate-settable clock. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 431 | static int rcg_clk_enable(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 432 | { | 
|  | 433 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 434 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 435 |  | 
|  | 436 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 437 | __rcg_clk_enable_reg(rcg); | 
|  | 438 | rcg->enabled = true; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 439 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 440 |  | 
|  | 441 | return 0; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 442 | } | 
|  | 443 |  | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 444 | /* Disable a rate-settable clock. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 445 | static void rcg_clk_disable(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 446 | { | 
|  | 447 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 448 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 449 |  | 
|  | 450 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 451 | __rcg_clk_disable_reg(rcg); | 
|  | 452 | rcg->enabled = false; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 453 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 454 | } | 
|  | 455 |  | 
| Stephen Boyd | 2c2875f | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 456 | static void rcg_clk_unprepare(struct clk *c) | 
|  | 457 | { | 
|  | 458 | struct rcg_clk *rcg = to_rcg_clk(c); | 
|  | 459 | rcg->prepared = false; | 
|  | 460 | } | 
|  | 461 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 462 | /* | 
|  | 463 | * Frequency-related functions | 
|  | 464 | */ | 
|  | 465 |  | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 466 | /* Set a clock to an exact rate. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 467 | static int rcg_clk_set_rate(struct clk *c, unsigned long rate) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 468 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 469 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 470 | struct clk_freq_tbl *nf, *cf; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 471 | struct clk *chld; | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 472 | int rc = 0; | 
| Stephen Boyd | 2c2875f | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 473 | unsigned long flags; | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 474 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 475 | for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 476 | && nf->freq_hz != rate; nf++) | 
|  | 477 | ; | 
|  | 478 |  | 
|  | 479 | if (nf->freq_hz == FREQ_END) | 
|  | 480 | return -EINVAL; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 481 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 482 | cf = rcg->current_freq; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 483 |  | 
| Stephen Boyd | 2c2875f | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 484 | /* Enable source clock dependency for the new frequency */ | 
|  | 485 | if (rcg->prepared) { | 
|  | 486 | rc = clk_prepare(nf->src_clk); | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 487 | if (rc) | 
|  | 488 | return rc; | 
| Stephen Boyd | 2c2875f | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 489 |  | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | spin_lock_irqsave(&c->lock, flags); | 
|  | 493 | if (rcg->enabled) { | 
|  | 494 | rc = clk_enable(nf->src_clk); | 
|  | 495 | if (rc) { | 
|  | 496 | spin_unlock_irqrestore(&c->lock, flags); | 
|  | 497 | clk_unprepare(nf->src_clk); | 
|  | 498 | return rc; | 
|  | 499 | } | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 500 | } | 
|  | 501 |  | 
|  | 502 | spin_lock(&local_clock_reg_lock); | 
|  | 503 |  | 
|  | 504 | /* Disable branch if clock isn't dual-banked with a glitch-free MUX. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 505 | if (!rcg->bank_info) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 506 | /* Disable all branches to prevent glitches. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 507 | list_for_each_entry(chld, &rcg->c.children, siblings) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 508 | struct branch_clk *x = to_branch_clk(chld); | 
|  | 509 | /* | 
|  | 510 | * We don't need to grab the child's lock because | 
|  | 511 | * we hold the local_clock_reg_lock and 'enabled' is | 
|  | 512 | * only modified within lock. | 
|  | 513 | */ | 
|  | 514 | if (x->enabled) | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 515 | __branch_disable_reg(&x->b, x->c.dbg_name); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 516 | } | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 517 | if (rcg->enabled) | 
|  | 518 | __rcg_clk_disable_reg(rcg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 519 | } | 
|  | 520 |  | 
|  | 521 | /* Perform clock-specific frequency switch operations. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 522 | BUG_ON(!rcg->set_rate); | 
|  | 523 | rcg->set_rate(rcg, nf); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 524 |  | 
|  | 525 | /* | 
| Matt Wagantall | 0625ea0 | 2011-07-13 18:51:56 -0700 | [diff] [blame] | 526 | * Current freq must be updated before __rcg_clk_enable_reg() | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 527 | * is called to make sure the MNCNTR_EN bit is set correctly. | 
|  | 528 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 529 | rcg->current_freq = nf; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 530 |  | 
|  | 531 | /* Enable any clocks that were disabled. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 532 | if (!rcg->bank_info) { | 
|  | 533 | if (rcg->enabled) | 
|  | 534 | __rcg_clk_enable_reg(rcg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 535 | /* Enable only branches that were ON before. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 536 | list_for_each_entry(chld, &rcg->c.children, siblings) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 537 | struct branch_clk *x = to_branch_clk(chld); | 
|  | 538 | if (x->enabled) | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 539 | __branch_enable_reg(&x->b, x->c.dbg_name); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 540 | } | 
|  | 541 | } | 
|  | 542 |  | 
|  | 543 | spin_unlock(&local_clock_reg_lock); | 
|  | 544 |  | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 545 | /* Release source requirements of the old freq. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 546 | if (rcg->enabled) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 547 | clk_disable(cf->src_clk); | 
| Stephen Boyd | 2c2875f | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 548 | spin_unlock_irqrestore(&c->lock, flags); | 
|  | 549 |  | 
|  | 550 | if (rcg->prepared) | 
|  | 551 | clk_unprepare(cf->src_clk); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 552 |  | 
|  | 553 | return rc; | 
|  | 554 | } | 
|  | 555 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 556 | /* Check if a clock is currently enabled. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 557 | static int rcg_clk_is_enabled(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 558 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 559 | return to_rcg_clk(c)->enabled; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 560 | } | 
|  | 561 |  | 
|  | 562 | /* Return a supported rate that's at least the specified rate. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 563 | static long rcg_clk_round_rate(struct clk *c, unsigned long rate) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 564 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 565 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 566 | struct clk_freq_tbl *f; | 
|  | 567 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 568 | for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 569 | if (f->freq_hz >= rate) | 
|  | 570 | return f->freq_hz; | 
|  | 571 |  | 
|  | 572 | return -EPERM; | 
|  | 573 | } | 
|  | 574 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 575 | /* Return the nth supported frequency for a given clock. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 576 | static int rcg_clk_list_rate(struct clk *c, unsigned n) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 577 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 578 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 579 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 580 | if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 581 | return -ENXIO; | 
|  | 582 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 583 | return (rcg->freq_tbl + n)->freq_hz; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 584 | } | 
|  | 585 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 586 | static struct clk *rcg_clk_get_parent(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 587 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 588 | return to_rcg_clk(c)->current_freq->src_clk; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 589 | } | 
|  | 590 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 591 | /* Disable hw clock gating if not set at boot */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 592 | enum handoff branch_handoff(struct branch *b, struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 593 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 594 | if (!branch_in_hwcg_mode(b)) { | 
|  | 595 | b->hwcg_mask = 0; | 
| Matt Wagantall | e3508bb | 2012-07-23 17:18:37 -0700 | [diff] [blame] | 596 | if (b->ctl_reg && readl_relaxed(b->ctl_reg) & b->en_mask) | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 597 | return HANDOFF_ENABLED_CLK; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 598 | } | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 599 | return HANDOFF_DISABLED_CLK; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 600 | } | 
|  | 601 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 602 | static enum handoff branch_clk_handoff(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 603 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 604 | struct branch_clk *br = to_branch_clk(c); | 
|  | 605 | return branch_handoff(&br->b, &br->c); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 606 | } | 
|  | 607 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 608 | static enum handoff rcg_clk_handoff(struct clk *c) | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 609 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 610 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 611 | uint32_t ctl_val, ns_val, md_val, ns_mask; | 
|  | 612 | struct clk_freq_tbl *freq; | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 613 | enum handoff ret; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 614 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 615 | ctl_val = readl_relaxed(rcg->b.ctl_reg); | 
|  | 616 | ret = branch_handoff(&rcg->b, &rcg->c); | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 617 | if (ret == HANDOFF_DISABLED_CLK) | 
|  | 618 | return HANDOFF_DISABLED_CLK; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 619 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 620 | if (rcg->bank_info) { | 
|  | 621 | const struct bank_masks *bank_masks = rcg->bank_info; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 622 | const struct bank_mask_info *bank_info; | 
| Stephen Boyd | c78d9a7 | 2011-07-20 00:46:24 -0700 | [diff] [blame] | 623 | if (!(ctl_val & bank_masks->bank_sel_mask)) | 
|  | 624 | bank_info = &bank_masks->bank0_mask; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 625 | else | 
| Stephen Boyd | c78d9a7 | 2011-07-20 00:46:24 -0700 | [diff] [blame] | 626 | bank_info = &bank_masks->bank1_mask; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 627 |  | 
|  | 628 | ns_mask = bank_info->ns_mask; | 
| Tianyi Gou | e46938b | 2012-01-31 12:30:12 -0800 | [diff] [blame] | 629 | md_val = bank_info->md_reg ? | 
|  | 630 | readl_relaxed(bank_info->md_reg) : 0; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 631 | } else { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 632 | ns_mask = rcg->ns_mask; | 
|  | 633 | md_val = rcg->md_reg ? readl_relaxed(rcg->md_reg) : 0; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 634 | } | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 635 | if (!ns_mask) | 
|  | 636 | return HANDOFF_UNKNOWN_RATE; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 637 | ns_val = readl_relaxed(rcg->ns_reg) & ns_mask; | 
|  | 638 | for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) { | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 639 | if ((freq->ns_val & ns_mask) == ns_val && | 
| Matt Wagantall | 2a59b21 | 2012-06-12 19:16:01 -0700 | [diff] [blame] | 640 | (!freq->md_val || freq->md_val == md_val)) | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 641 | break; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 642 | } | 
|  | 643 | if (freq->freq_hz == FREQ_END) | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 644 | return HANDOFF_UNKNOWN_RATE; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 645 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 646 | rcg->current_freq = freq; | 
| Stephen Boyd | e891ca3 | 2012-03-19 12:16:36 -0700 | [diff] [blame] | 647 | c->rate = freq->freq_hz; | 
| Matt Wagantall | 271a6cd | 2011-09-20 16:06:31 -0700 | [diff] [blame] | 648 |  | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 649 | return HANDOFF_ENABLED_CLK; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 650 | } | 
|  | 651 |  | 
| Matt Wagantall | ae05322 | 2012-05-14 19:42:07 -0700 | [diff] [blame] | 652 | struct clk_ops clk_ops_empty; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 653 |  | 
|  | 654 | struct fixed_clk gnd_clk = { | 
|  | 655 | .c = { | 
|  | 656 | .dbg_name = "ground_clk", | 
| Matt Wagantall | ae05322 | 2012-05-14 19:42:07 -0700 | [diff] [blame] | 657 | .ops = &clk_ops_empty, | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 658 | CLK_INIT(gnd_clk.c), | 
|  | 659 | }, | 
|  | 660 | }; | 
|  | 661 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 662 | static int branch_clk_enable(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 663 | { | 
|  | 664 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 665 | struct branch_clk *br = to_branch_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 666 |  | 
|  | 667 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 668 | __branch_enable_reg(&br->b, br->c.dbg_name); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 669 | br->enabled = true; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 670 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 671 |  | 
|  | 672 | return 0; | 
|  | 673 | } | 
|  | 674 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 675 | static void branch_clk_disable(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 676 | { | 
|  | 677 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 678 | struct branch_clk *br = to_branch_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 679 |  | 
|  | 680 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 681 | __branch_disable_reg(&br->b, br->c.dbg_name); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 682 | br->enabled = false; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 683 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 684 | } | 
|  | 685 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 686 | static struct clk *branch_clk_get_parent(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 687 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 688 | return to_branch_clk(c)->parent; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 689 | } | 
|  | 690 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 691 | static int branch_clk_is_enabled(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 692 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 693 | return to_branch_clk(c)->enabled; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 694 | } | 
|  | 695 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 696 | static void branch_enable_hwcg(struct branch *b) | 
|  | 697 | { | 
|  | 698 | unsigned long flags; | 
|  | 699 | u32 reg_val; | 
|  | 700 |  | 
|  | 701 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
|  | 702 | reg_val = readl_relaxed(b->hwcg_reg); | 
|  | 703 | reg_val |= b->hwcg_mask; | 
|  | 704 | writel_relaxed(reg_val, b->hwcg_reg); | 
|  | 705 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 706 | } | 
|  | 707 |  | 
|  | 708 | static void branch_disable_hwcg(struct branch *b) | 
|  | 709 | { | 
|  | 710 | unsigned long flags; | 
|  | 711 | u32 reg_val; | 
|  | 712 |  | 
|  | 713 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
|  | 714 | reg_val = readl_relaxed(b->hwcg_reg); | 
|  | 715 | reg_val &= ~b->hwcg_mask; | 
|  | 716 | writel_relaxed(reg_val, b->hwcg_reg); | 
|  | 717 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 718 | } | 
|  | 719 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 720 | static void branch_clk_enable_hwcg(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 721 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 722 | branch_enable_hwcg(&to_branch_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 723 | } | 
|  | 724 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 725 | static void branch_clk_disable_hwcg(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 726 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 727 | branch_disable_hwcg(&to_branch_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 728 | } | 
|  | 729 |  | 
| Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 730 | static int branch_set_flags(struct branch *b, unsigned flags) | 
|  | 731 | { | 
|  | 732 | unsigned long irq_flags; | 
|  | 733 | u32 reg_val; | 
|  | 734 | int ret = 0; | 
|  | 735 |  | 
|  | 736 | if (!b->retain_reg) | 
|  | 737 | return -EPERM; | 
|  | 738 |  | 
|  | 739 | spin_lock_irqsave(&local_clock_reg_lock, irq_flags); | 
|  | 740 | reg_val = readl_relaxed(b->retain_reg); | 
|  | 741 | switch (flags) { | 
|  | 742 | case CLKFLAG_RETAIN: | 
|  | 743 | reg_val |= b->retain_mask; | 
|  | 744 | break; | 
|  | 745 | case CLKFLAG_NORETAIN: | 
|  | 746 | reg_val &= ~b->retain_mask; | 
|  | 747 | break; | 
|  | 748 | default: | 
|  | 749 | ret = -EINVAL; | 
|  | 750 | } | 
|  | 751 | writel_relaxed(reg_val, b->retain_reg); | 
|  | 752 | spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags); | 
|  | 753 |  | 
|  | 754 | return ret; | 
|  | 755 | } | 
|  | 756 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 757 | static int branch_clk_set_flags(struct clk *clk, unsigned flags) | 
| Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 758 | { | 
|  | 759 | return branch_set_flags(&to_branch_clk(clk)->b, flags); | 
|  | 760 | } | 
|  | 761 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 762 | static int branch_clk_in_hwcg_mode(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 763 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 764 | return branch_in_hwcg_mode(&to_branch_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 765 | } | 
|  | 766 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 767 | static void rcg_clk_enable_hwcg(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 768 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 769 | branch_enable_hwcg(&to_rcg_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 770 | } | 
|  | 771 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 772 | static void rcg_clk_disable_hwcg(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 773 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 774 | branch_disable_hwcg(&to_rcg_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 775 | } | 
|  | 776 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 777 | static int rcg_clk_in_hwcg_mode(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 778 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 779 | return branch_in_hwcg_mode(&to_rcg_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 780 | } | 
|  | 781 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 782 | static int rcg_clk_set_flags(struct clk *clk, unsigned flags) | 
| Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 783 | { | 
|  | 784 | return branch_set_flags(&to_rcg_clk(clk)->b, flags); | 
|  | 785 | } | 
|  | 786 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 787 | int branch_reset(struct branch *b, enum clk_reset_action action) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 788 | { | 
|  | 789 | int ret = 0; | 
|  | 790 | u32 reg_val; | 
|  | 791 | unsigned long flags; | 
|  | 792 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 793 | if (!b->reset_reg) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 794 | return -EPERM; | 
|  | 795 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 796 | /* Disable hw gating when asserting a reset */ | 
|  | 797 | if (b->hwcg_mask && action == CLK_RESET_ASSERT) | 
|  | 798 | branch_disable_hwcg(b); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 799 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 800 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
|  | 801 | /* Assert/Deassert reset */ | 
|  | 802 | reg_val = readl_relaxed(b->reset_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 803 | switch (action) { | 
|  | 804 | case CLK_RESET_ASSERT: | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 805 | reg_val |= b->reset_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 806 | break; | 
|  | 807 | case CLK_RESET_DEASSERT: | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 808 | reg_val &= ~b->reset_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 809 | break; | 
|  | 810 | default: | 
|  | 811 | ret = -EINVAL; | 
|  | 812 | } | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 813 | writel_relaxed(reg_val, b->reset_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 814 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 815 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 816 | /* Enable hw gating when deasserting a reset */ | 
|  | 817 | if (b->hwcg_mask && action == CLK_RESET_DEASSERT) | 
|  | 818 | branch_enable_hwcg(b); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 819 | /* Make sure write is issued before returning. */ | 
|  | 820 | mb(); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 821 | return ret; | 
|  | 822 | } | 
|  | 823 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 824 | static int branch_clk_reset(struct clk *c, enum clk_reset_action action) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 825 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 826 | return branch_reset(&to_branch_clk(c)->b, action); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 827 | } | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 828 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 829 | struct clk_ops clk_ops_branch = { | 
|  | 830 | .enable = branch_clk_enable, | 
|  | 831 | .disable = branch_clk_disable, | 
|  | 832 | .enable_hwcg = branch_clk_enable_hwcg, | 
|  | 833 | .disable_hwcg = branch_clk_disable_hwcg, | 
|  | 834 | .in_hwcg_mode = branch_clk_in_hwcg_mode, | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 835 | .is_enabled = branch_clk_is_enabled, | 
|  | 836 | .reset = branch_clk_reset, | 
|  | 837 | .get_parent = branch_clk_get_parent, | 
|  | 838 | .handoff = branch_clk_handoff, | 
|  | 839 | .set_flags = branch_clk_set_flags, | 
|  | 840 | }; | 
|  | 841 |  | 
|  | 842 | struct clk_ops clk_ops_reset = { | 
|  | 843 | .reset = branch_clk_reset, | 
|  | 844 | }; | 
|  | 845 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 846 | static int rcg_clk_reset(struct clk *c, enum clk_reset_action action) | 
| Stephen Boyd | 7bf2814 | 2011-12-07 00:30:52 -0800 | [diff] [blame] | 847 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 848 | return branch_reset(&to_rcg_clk(c)->b, action); | 
| Stephen Boyd | 7bf2814 | 2011-12-07 00:30:52 -0800 | [diff] [blame] | 849 | } | 
|  | 850 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 851 | struct clk_ops clk_ops_rcg = { | 
| Stephen Boyd | 2c2875f | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 852 | .prepare = rcg_clk_prepare, | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 853 | .enable = rcg_clk_enable, | 
|  | 854 | .disable = rcg_clk_disable, | 
| Stephen Boyd | 2c2875f | 2012-01-24 17:36:34 -0800 | [diff] [blame] | 855 | .unprepare = rcg_clk_unprepare, | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 856 | .enable_hwcg = rcg_clk_enable_hwcg, | 
|  | 857 | .disable_hwcg = rcg_clk_disable_hwcg, | 
|  | 858 | .in_hwcg_mode = rcg_clk_in_hwcg_mode, | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 859 | .handoff = rcg_clk_handoff, | 
|  | 860 | .set_rate = rcg_clk_set_rate, | 
|  | 861 | .list_rate = rcg_clk_list_rate, | 
|  | 862 | .is_enabled = rcg_clk_is_enabled, | 
|  | 863 | .round_rate = rcg_clk_round_rate, | 
|  | 864 | .reset = rcg_clk_reset, | 
|  | 865 | .get_parent = rcg_clk_get_parent, | 
|  | 866 | .set_flags = rcg_clk_set_flags, | 
|  | 867 | }; | 
|  | 868 |  | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 869 | static int cdiv_clk_enable(struct clk *c) | 
|  | 870 | { | 
|  | 871 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 872 | struct cdiv_clk *cdiv = to_cdiv_clk(c); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 873 |  | 
|  | 874 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 875 | __branch_enable_reg(&cdiv->b, cdiv->c.dbg_name); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 876 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 877 |  | 
|  | 878 | return 0; | 
|  | 879 | } | 
|  | 880 |  | 
|  | 881 | static void cdiv_clk_disable(struct clk *c) | 
|  | 882 | { | 
|  | 883 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 884 | struct cdiv_clk *cdiv = to_cdiv_clk(c); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 885 |  | 
|  | 886 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | 0de1b3f | 2012-06-05 19:52:43 -0700 | [diff] [blame] | 887 | __branch_disable_reg(&cdiv->b, cdiv->c.dbg_name); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 888 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 889 | } | 
|  | 890 |  | 
|  | 891 | static int cdiv_clk_set_rate(struct clk *c, unsigned long rate) | 
|  | 892 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 893 | struct cdiv_clk *cdiv = to_cdiv_clk(c); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 894 | u32 reg_val; | 
|  | 895 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 896 | if (rate > cdiv->max_div) | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 897 | return -EINVAL; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 898 |  | 
|  | 899 | spin_lock(&local_clock_reg_lock); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 900 | reg_val = readl_relaxed(cdiv->ns_reg); | 
|  | 901 | reg_val &= ~(cdiv->ext_mask | (cdiv->max_div - 1) << cdiv->div_offset); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 902 | /* Non-zero rates mean set a divider, zero means use external input */ | 
|  | 903 | if (rate) | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 904 | reg_val |= (rate - 1) << cdiv->div_offset; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 905 | else | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 906 | reg_val |= cdiv->ext_mask; | 
|  | 907 | writel_relaxed(reg_val, cdiv->ns_reg); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 908 | spin_unlock(&local_clock_reg_lock); | 
|  | 909 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 910 | cdiv->cur_div = rate; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 911 | return 0; | 
|  | 912 | } | 
|  | 913 |  | 
|  | 914 | static unsigned long cdiv_clk_get_rate(struct clk *c) | 
|  | 915 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 916 | return to_cdiv_clk(c)->cur_div; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 917 | } | 
|  | 918 |  | 
|  | 919 | static long cdiv_clk_round_rate(struct clk *c, unsigned long rate) | 
|  | 920 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 921 | return rate > to_cdiv_clk(c)->max_div ? -EPERM : rate; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 922 | } | 
|  | 923 |  | 
|  | 924 | static int cdiv_clk_list_rate(struct clk *c, unsigned n) | 
|  | 925 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 926 | return n > to_cdiv_clk(c)->max_div ? -ENXIO : n; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 927 | } | 
|  | 928 |  | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 929 | static enum handoff cdiv_clk_handoff(struct clk *c) | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 930 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 931 | struct cdiv_clk *cdiv = to_cdiv_clk(c); | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 932 | enum handoff ret; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 933 | u32 reg_val; | 
|  | 934 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 935 | ret = branch_handoff(&cdiv->b, &cdiv->c); | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 936 | if (ret == HANDOFF_DISABLED_CLK) | 
|  | 937 | return ret; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 938 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 939 | reg_val = readl_relaxed(cdiv->ns_reg); | 
|  | 940 | if (reg_val & cdiv->ext_mask) { | 
|  | 941 | cdiv->cur_div = 0; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 942 | } else { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 943 | reg_val >>= cdiv->div_offset; | 
|  | 944 | cdiv->cur_div = (reg_val & (cdiv->max_div - 1)) + 1; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 945 | } | 
|  | 946 |  | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 947 | return HANDOFF_ENABLED_CLK; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 948 | } | 
|  | 949 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 950 | static void cdiv_clk_enable_hwcg(struct clk *c) | 
|  | 951 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 952 | branch_enable_hwcg(&to_cdiv_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 953 | } | 
|  | 954 |  | 
|  | 955 | static void cdiv_clk_disable_hwcg(struct clk *c) | 
|  | 956 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 957 | branch_disable_hwcg(&to_cdiv_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 958 | } | 
|  | 959 |  | 
|  | 960 | static int cdiv_clk_in_hwcg_mode(struct clk *c) | 
|  | 961 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame] | 962 | return branch_in_hwcg_mode(&to_cdiv_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 963 | } | 
|  | 964 |  | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 965 | struct clk_ops clk_ops_cdiv = { | 
|  | 966 | .enable = cdiv_clk_enable, | 
|  | 967 | .disable = cdiv_clk_disable, | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 968 | .in_hwcg_mode = cdiv_clk_in_hwcg_mode, | 
|  | 969 | .enable_hwcg = cdiv_clk_enable_hwcg, | 
|  | 970 | .disable_hwcg = cdiv_clk_disable_hwcg, | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 971 | .handoff = cdiv_clk_handoff, | 
|  | 972 | .set_rate = cdiv_clk_set_rate, | 
|  | 973 | .get_rate = cdiv_clk_get_rate, | 
|  | 974 | .list_rate = cdiv_clk_list_rate, | 
|  | 975 | .round_rate = cdiv_clk_round_rate, | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 976 | }; |