| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved. | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * | 
|  | 3 | * This program is free software; you can redistribute it and/or modify | 
|  | 4 | * it under the terms of the GNU General Public License version 2 and | 
|  | 5 | * only version 2 as published by the Free Software Foundation. | 
|  | 6 | * | 
|  | 7 | * This program is distributed in the hope that it will be useful, | 
|  | 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 10 | * GNU General Public License for more details. | 
|  | 11 | * | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | #define pr_fmt(fmt) "%s: " fmt, __func__ | 
|  | 15 |  | 
|  | 16 | #include <linux/kernel.h> | 
|  | 17 | #include <linux/init.h> | 
|  | 18 | #include <linux/err.h> | 
|  | 19 | #include <linux/ctype.h> | 
|  | 20 | #include <linux/bitops.h> | 
|  | 21 | #include <linux/io.h> | 
|  | 22 | #include <linux/spinlock.h> | 
|  | 23 | #include <linux/delay.h> | 
|  | 24 | #include <linux/clk.h> | 
|  | 25 |  | 
|  | 26 | #include <mach/msm_iomap.h> | 
|  | 27 | #include <mach/clk.h> | 
|  | 28 | #include <mach/scm-io.h> | 
|  | 29 |  | 
|  | 30 | #include "clock.h" | 
|  | 31 | #include "clock-local.h" | 
|  | 32 |  | 
|  | 33 | #ifdef CONFIG_MSM_SECURE_IO | 
|  | 34 | #undef readl_relaxed | 
|  | 35 | #undef writel_relaxed | 
|  | 36 | #define readl_relaxed secure_readl | 
|  | 37 | #define writel_relaxed secure_writel | 
|  | 38 | #endif | 
|  | 39 |  | 
|  | 40 | /* | 
|  | 41 | * When enabling/disabling a clock, check the halt bit up to this number | 
|  | 42 | * number of times (with a 1 us delay in between) before continuing. | 
|  | 43 | */ | 
| Stephen Boyd | 138da0e | 2011-08-05 13:25:57 -0700 | [diff] [blame] | 44 | #define HALT_CHECK_MAX_LOOPS	200 | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 45 | /* For clock without halt checking, wait this long after enables/disables. */ | 
|  | 46 | #define HALT_CHECK_DELAY_US	10 | 
|  | 47 |  | 
|  | 48 | DEFINE_SPINLOCK(local_clock_reg_lock); | 
| Matt Wagantall | 84f43fd | 2011-08-16 23:28:38 -0700 | [diff] [blame] | 49 | struct clk_freq_tbl rcg_dummy_freq = F_END; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 50 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 51 | /* | 
|  | 52 | * Common Set-Rate Functions | 
|  | 53 | */ | 
|  | 54 |  | 
|  | 55 | /* For clocks with MND dividers. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 56 | void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 57 | { | 
|  | 58 | uint32_t ns_reg_val, ctl_reg_val; | 
|  | 59 |  | 
|  | 60 | /* Assert MND reset. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 61 | ns_reg_val = readl_relaxed(rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 62 | ns_reg_val |= BIT(7); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 63 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 64 |  | 
|  | 65 | /* Program M and D values. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 66 | writel_relaxed(nf->md_val, rcg->md_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 67 |  | 
|  | 68 | /* If the clock has a separate CC register, program it. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 69 | if (rcg->ns_reg != rcg->b.ctl_reg) { | 
|  | 70 | ctl_reg_val = readl_relaxed(rcg->b.ctl_reg); | 
|  | 71 | ctl_reg_val &= ~(rcg->ctl_mask); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 72 | ctl_reg_val |= nf->ctl_val; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 73 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 74 | } | 
|  | 75 |  | 
|  | 76 | /* Deassert MND reset. */ | 
|  | 77 | ns_reg_val &= ~BIT(7); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 78 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 81 | void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 82 | { | 
|  | 83 | /* | 
|  | 84 | * Nothing to do for fixed-rate or integer-divider clocks. Any settings | 
|  | 85 | * in NS registers are applied in the enable path, since power can be | 
|  | 86 | * saved by leaving an un-clocked or slowly-clocked source selected | 
|  | 87 | * until the clock is enabled. | 
|  | 88 | */ | 
|  | 89 | } | 
|  | 90 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 91 | void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 92 | { | 
|  | 93 | uint32_t ctl_reg_val; | 
|  | 94 |  | 
|  | 95 | /* Assert MND reset. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 96 | ctl_reg_val = readl_relaxed(rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 97 | ctl_reg_val |= BIT(8); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 98 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 99 |  | 
|  | 100 | /* Program M and D values. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 101 | writel_relaxed(nf->md_val, rcg->md_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 102 |  | 
|  | 103 | /* Program MN counter Enable and Mode. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 104 | ctl_reg_val &= ~(rcg->ctl_mask); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 105 | ctl_reg_val |= nf->ctl_val; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 106 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 107 |  | 
|  | 108 | /* Deassert MND reset. */ | 
|  | 109 | ctl_reg_val &= ~BIT(8); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 110 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 111 | } | 
|  | 112 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 113 | void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 114 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 115 | struct bank_masks *banks = rcg->bank_info; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 116 | const struct bank_mask_info *new_bank_masks; | 
|  | 117 | const struct bank_mask_info *old_bank_masks; | 
|  | 118 | uint32_t ns_reg_val, ctl_reg_val; | 
|  | 119 | uint32_t bank_sel; | 
|  | 120 |  | 
|  | 121 | /* | 
|  | 122 | * Determine active bank and program the other one. If the clock is | 
|  | 123 | * off, program the active bank since bank switching won't work if | 
|  | 124 | * both banks aren't running. | 
|  | 125 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 126 | ctl_reg_val = readl_relaxed(rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 127 | bank_sel = !!(ctl_reg_val & banks->bank_sel_mask); | 
|  | 128 | /* If clock isn't running, don't switch banks. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 129 | bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 130 | if (bank_sel == 0) { | 
|  | 131 | new_bank_masks = &banks->bank1_mask; | 
|  | 132 | old_bank_masks = &banks->bank0_mask; | 
|  | 133 | } else { | 
|  | 134 | new_bank_masks = &banks->bank0_mask; | 
|  | 135 | old_bank_masks = &banks->bank1_mask; | 
|  | 136 | } | 
|  | 137 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 138 | ns_reg_val = readl_relaxed(rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 139 |  | 
|  | 140 | /* Assert bank MND reset. */ | 
|  | 141 | ns_reg_val |= new_bank_masks->rst_mask; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 142 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 143 |  | 
|  | 144 | /* | 
|  | 145 | * Program NS only if the clock is enabled, since the NS will be set | 
|  | 146 | * as part of the enable procedure and should remain with a low-power | 
|  | 147 | * MUX input selected until then. | 
|  | 148 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 149 | if (rcg->enabled) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 150 | ns_reg_val &= ~(new_bank_masks->ns_mask); | 
|  | 151 | ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 152 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 153 | } | 
|  | 154 |  | 
|  | 155 | writel_relaxed(nf->md_val, new_bank_masks->md_reg); | 
|  | 156 |  | 
|  | 157 | /* Enable counter only if clock is enabled. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 158 | if (rcg->enabled) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 159 | ctl_reg_val |= new_bank_masks->mnd_en_mask; | 
|  | 160 | else | 
|  | 161 | ctl_reg_val &= ~(new_bank_masks->mnd_en_mask); | 
|  | 162 |  | 
|  | 163 | ctl_reg_val &= ~(new_bank_masks->mode_mask); | 
|  | 164 | ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 165 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 166 |  | 
|  | 167 | /* Deassert bank MND reset. */ | 
|  | 168 | ns_reg_val &= ~(new_bank_masks->rst_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 169 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 170 |  | 
|  | 171 | /* | 
|  | 172 | * Switch to the new bank if clock is running.  If it isn't, then | 
|  | 173 | * no switch is necessary since we programmed the active bank. | 
|  | 174 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 175 | if (rcg->enabled && rcg->current_freq->freq_hz) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 176 | ctl_reg_val ^= banks->bank_sel_mask; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 177 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 178 | /* | 
|  | 179 | * Wait at least 6 cycles of slowest bank's clock | 
|  | 180 | * for the glitch-free MUX to fully switch sources. | 
|  | 181 | */ | 
|  | 182 | mb(); | 
|  | 183 | udelay(1); | 
|  | 184 |  | 
|  | 185 | /* Disable old bank's MN counter. */ | 
|  | 186 | ctl_reg_val &= ~(old_bank_masks->mnd_en_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 187 | writel_relaxed(ctl_reg_val, rcg->b.ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 188 |  | 
|  | 189 | /* Program old bank to a low-power source and divider. */ | 
|  | 190 | ns_reg_val &= ~(old_bank_masks->ns_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 191 | ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask); | 
|  | 192 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 193 | } | 
|  | 194 |  | 
| Matt Wagantall | 07c4547 | 2012-02-10 23:27:24 -0800 | [diff] [blame] | 195 | /* Update the MND_EN and NS masks to match the current bank. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 196 | rcg->mnd_en_mask = new_bank_masks->mnd_en_mask; | 
|  | 197 | rcg->ns_mask = new_bank_masks->ns_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 198 | } | 
|  | 199 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 200 | void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 201 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 202 | struct bank_masks *banks = rcg->bank_info; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 203 | const struct bank_mask_info *new_bank_masks; | 
|  | 204 | const struct bank_mask_info *old_bank_masks; | 
|  | 205 | uint32_t ns_reg_val, bank_sel; | 
|  | 206 |  | 
|  | 207 | /* | 
|  | 208 | * Determine active bank and program the other one. If the clock is | 
|  | 209 | * off, program the active bank since bank switching won't work if | 
|  | 210 | * both banks aren't running. | 
|  | 211 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 212 | ns_reg_val = readl_relaxed(rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 213 | bank_sel = !!(ns_reg_val & banks->bank_sel_mask); | 
|  | 214 | /* If clock isn't running, don't switch banks. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 215 | bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 216 | if (bank_sel == 0) { | 
|  | 217 | new_bank_masks = &banks->bank1_mask; | 
|  | 218 | old_bank_masks = &banks->bank0_mask; | 
|  | 219 | } else { | 
|  | 220 | new_bank_masks = &banks->bank0_mask; | 
|  | 221 | old_bank_masks = &banks->bank1_mask; | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | /* | 
|  | 225 | * Program NS only if the clock is enabled, since the NS will be set | 
|  | 226 | * as part of the enable procedure and should remain with a low-power | 
|  | 227 | * MUX input selected until then. | 
|  | 228 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 229 | if (rcg->enabled) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 230 | ns_reg_val &= ~(new_bank_masks->ns_mask); | 
|  | 231 | ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 232 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 233 | } | 
|  | 234 |  | 
|  | 235 | /* | 
|  | 236 | * Switch to the new bank if clock is running.  If it isn't, then | 
|  | 237 | * no switch is necessary since we programmed the active bank. | 
|  | 238 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 239 | if (rcg->enabled && rcg->current_freq->freq_hz) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 240 | ns_reg_val ^= banks->bank_sel_mask; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 241 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 242 | /* | 
|  | 243 | * Wait at least 6 cycles of slowest bank's clock | 
|  | 244 | * for the glitch-free MUX to fully switch sources. | 
|  | 245 | */ | 
|  | 246 | mb(); | 
|  | 247 | udelay(1); | 
|  | 248 |  | 
|  | 249 | /* Program old bank to a low-power source and divider. */ | 
|  | 250 | ns_reg_val &= ~(old_bank_masks->ns_mask); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 251 | ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask); | 
|  | 252 | writel_relaxed(ns_reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 253 | } | 
|  | 254 |  | 
|  | 255 | /* Update the NS mask to match the current bank. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 256 | rcg->ns_mask = new_bank_masks->ns_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 257 | } | 
|  | 258 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 259 | /* | 
|  | 260 | * Clock enable/disable functions | 
|  | 261 | */ | 
|  | 262 |  | 
|  | 263 | /* Return non-zero if a clock status registers shows the clock is halted. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 264 | static int branch_clk_is_halted(const struct branch *b) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 265 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 266 | int invert = (b->halt_check == ENABLE); | 
|  | 267 | int status_bit = readl_relaxed(b->halt_reg) & BIT(b->halt_bit); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 268 | return invert ? !status_bit : status_bit; | 
|  | 269 | } | 
|  | 270 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 271 | static int branch_in_hwcg_mode(const struct branch *b) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 272 | { | 
|  | 273 | if (!b->hwcg_mask) | 
|  | 274 | return 0; | 
|  | 275 |  | 
|  | 276 | return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask); | 
|  | 277 | } | 
|  | 278 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 279 | void __branch_clk_enable_reg(const struct branch *b, const char *name) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 280 | { | 
|  | 281 | u32 reg_val; | 
|  | 282 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 283 | if (b->en_mask) { | 
|  | 284 | reg_val = readl_relaxed(b->ctl_reg); | 
|  | 285 | reg_val |= b->en_mask; | 
|  | 286 | writel_relaxed(reg_val, b->ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 287 | } | 
|  | 288 |  | 
|  | 289 | /* | 
|  | 290 | * Use a memory barrier since some halt status registers are | 
|  | 291 | * not within the same 1K segment as the branch/root enable | 
|  | 292 | * registers.  It's also needed in the udelay() case to ensure | 
|  | 293 | * the delay starts after the branch enable. | 
|  | 294 | */ | 
|  | 295 | mb(); | 
|  | 296 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 297 | /* Skip checking halt bit if the clock is in hardware gated mode */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 298 | if (branch_in_hwcg_mode(b)) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 299 | return; | 
|  | 300 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 301 | /* Wait for clock to enable before returning. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 302 | if (b->halt_check == DELAY) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 303 | udelay(HALT_CHECK_DELAY_US); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 304 | } else if (b->halt_check == ENABLE || b->halt_check == HALT | 
|  | 305 | || b->halt_check == ENABLE_VOTED | 
|  | 306 | || b->halt_check == HALT_VOTED) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 307 | int count; | 
|  | 308 |  | 
|  | 309 | /* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 310 | for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(b) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 311 | && count > 0; count--) | 
|  | 312 | udelay(1); | 
|  | 313 | WARN(count == 0, "%s status stuck at 'off'", name); | 
|  | 314 | } | 
|  | 315 | } | 
|  | 316 |  | 
|  | 317 | /* Perform any register operations required to enable the clock. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 318 | static void __rcg_clk_enable_reg(struct rcg_clk *rcg) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 319 | { | 
|  | 320 | u32 reg_val; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 321 | void __iomem *const reg = rcg->b.ctl_reg; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 322 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 323 | WARN(rcg->current_freq == &rcg_dummy_freq, | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 324 | "Attempting to enable %s before setting its rate. " | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 325 | "Set the rate first!\n", rcg->c.dbg_name); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 326 |  | 
|  | 327 | /* | 
|  | 328 | * Program the NS register, if applicable. NS registers are not | 
|  | 329 | * set in the set_rate path because power can be saved by deferring | 
|  | 330 | * the selection of a clocked source until the clock is enabled. | 
|  | 331 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 332 | if (rcg->ns_mask) { | 
|  | 333 | reg_val = readl_relaxed(rcg->ns_reg); | 
|  | 334 | reg_val &= ~(rcg->ns_mask); | 
|  | 335 | reg_val |= (rcg->current_freq->ns_val & rcg->ns_mask); | 
|  | 336 | writel_relaxed(reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 337 | } | 
|  | 338 |  | 
|  | 339 | /* Enable MN counter, if applicable. */ | 
|  | 340 | reg_val = readl_relaxed(reg); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 341 | if (rcg->current_freq->md_val) { | 
|  | 342 | reg_val |= rcg->mnd_en_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 343 | writel_relaxed(reg_val, reg); | 
|  | 344 | } | 
|  | 345 | /* Enable root. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 346 | if (rcg->root_en_mask) { | 
|  | 347 | reg_val |= rcg->root_en_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 348 | writel_relaxed(reg_val, reg); | 
|  | 349 | } | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 350 | __branch_clk_enable_reg(&rcg->b, rcg->c.dbg_name); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 351 | } | 
|  | 352 |  | 
|  | 353 | /* Perform any register operations required to disable the branch. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 354 | u32 __branch_clk_disable_reg(const struct branch *b, const char *name) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 355 | { | 
|  | 356 | u32 reg_val; | 
|  | 357 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 358 | reg_val = readl_relaxed(b->ctl_reg); | 
|  | 359 | if (b->en_mask) { | 
|  | 360 | reg_val &= ~(b->en_mask); | 
|  | 361 | writel_relaxed(reg_val, b->ctl_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 362 | } | 
|  | 363 |  | 
|  | 364 | /* | 
|  | 365 | * Use a memory barrier since some halt status registers are | 
|  | 366 | * not within the same K segment as the branch/root enable | 
|  | 367 | * registers.  It's also needed in the udelay() case to ensure | 
|  | 368 | * the delay starts after the branch disable. | 
|  | 369 | */ | 
|  | 370 | mb(); | 
|  | 371 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 372 | /* Skip checking halt bit if the clock is in hardware gated mode */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 373 | if (branch_in_hwcg_mode(b)) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 374 | return reg_val; | 
|  | 375 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 376 | /* Wait for clock to disable before continuing. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 377 | if (b->halt_check == DELAY || b->halt_check == ENABLE_VOTED | 
|  | 378 | || b->halt_check == HALT_VOTED) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 379 | udelay(HALT_CHECK_DELAY_US); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 380 | } else if (b->halt_check == ENABLE || b->halt_check == HALT) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 381 | int count; | 
|  | 382 |  | 
|  | 383 | /* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 384 | for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(b) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 385 | && count > 0; count--) | 
|  | 386 | udelay(1); | 
|  | 387 | WARN(count == 0, "%s status stuck at 'on'", name); | 
|  | 388 | } | 
|  | 389 |  | 
|  | 390 | return reg_val; | 
|  | 391 | } | 
|  | 392 |  | 
|  | 393 | /* Perform any register operations required to disable the generator. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 394 | static void __rcg_clk_disable_reg(struct rcg_clk *rcg) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 395 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 396 | void __iomem *const reg = rcg->b.ctl_reg; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 397 | uint32_t reg_val; | 
|  | 398 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 399 | reg_val = __branch_clk_disable_reg(&rcg->b, rcg->c.dbg_name); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 400 | /* Disable root. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 401 | if (rcg->root_en_mask) { | 
|  | 402 | reg_val &= ~(rcg->root_en_mask); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 403 | writel_relaxed(reg_val, reg); | 
|  | 404 | } | 
|  | 405 | /* Disable MN counter, if applicable. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 406 | if (rcg->current_freq->md_val) { | 
|  | 407 | reg_val &= ~(rcg->mnd_en_mask); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 408 | writel_relaxed(reg_val, reg); | 
|  | 409 | } | 
|  | 410 | /* | 
|  | 411 | * Program NS register to low-power value with an un-clocked or | 
|  | 412 | * slowly-clocked source selected. | 
|  | 413 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 414 | if (rcg->ns_mask) { | 
|  | 415 | reg_val = readl_relaxed(rcg->ns_reg); | 
|  | 416 | reg_val &= ~(rcg->ns_mask); | 
|  | 417 | reg_val |= (rcg->freq_tbl->ns_val & rcg->ns_mask); | 
|  | 418 | writel_relaxed(reg_val, rcg->ns_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 419 | } | 
|  | 420 | } | 
|  | 421 |  | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 422 | /* Enable a rate-settable clock. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 423 | static int rcg_clk_enable(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 424 | { | 
|  | 425 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 426 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 427 |  | 
|  | 428 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 429 | __rcg_clk_enable_reg(rcg); | 
|  | 430 | rcg->enabled = true; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 431 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 432 |  | 
|  | 433 | return 0; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 434 | } | 
|  | 435 |  | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 436 | /* Disable a rate-settable clock. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 437 | static void rcg_clk_disable(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 438 | { | 
|  | 439 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 440 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 441 |  | 
|  | 442 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 443 | __rcg_clk_disable_reg(rcg); | 
|  | 444 | rcg->enabled = false; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 445 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 446 | } | 
|  | 447 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 448 | /* | 
|  | 449 | * Frequency-related functions | 
|  | 450 | */ | 
|  | 451 |  | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 452 | /* Set a clock to an exact rate. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 453 | static int rcg_clk_set_rate(struct clk *c, unsigned long rate) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 454 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 455 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 456 | struct clk_freq_tbl *nf, *cf; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 457 | struct clk *chld; | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 458 | int rc = 0; | 
|  | 459 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 460 | for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END | 
| Matt Wagantall | ab1adce | 2012-01-24 14:57:24 -0800 | [diff] [blame] | 461 | && nf->freq_hz != rate; nf++) | 
|  | 462 | ; | 
|  | 463 |  | 
|  | 464 | if (nf->freq_hz == FREQ_END) | 
|  | 465 | return -EINVAL; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 466 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 467 | cf = rcg->current_freq; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 468 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 469 | if (rcg->enabled) { | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 470 | /* Enable source clock dependency for the new freq. */ | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 471 | rc = clk_enable(nf->src_clk); | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 472 | if (rc) | 
|  | 473 | return rc; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 474 | } | 
|  | 475 |  | 
|  | 476 | spin_lock(&local_clock_reg_lock); | 
|  | 477 |  | 
|  | 478 | /* Disable branch if clock isn't dual-banked with a glitch-free MUX. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 479 | if (!rcg->bank_info) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 480 | /* Disable all branches to prevent glitches. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 481 | list_for_each_entry(chld, &rcg->c.children, siblings) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 482 | struct branch_clk *x = to_branch_clk(chld); | 
|  | 483 | /* | 
|  | 484 | * We don't need to grab the child's lock because | 
|  | 485 | * we hold the local_clock_reg_lock and 'enabled' is | 
|  | 486 | * only modified within lock. | 
|  | 487 | */ | 
|  | 488 | if (x->enabled) | 
|  | 489 | __branch_clk_disable_reg(&x->b, x->c.dbg_name); | 
|  | 490 | } | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 491 | if (rcg->enabled) | 
|  | 492 | __rcg_clk_disable_reg(rcg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 493 | } | 
|  | 494 |  | 
|  | 495 | /* Perform clock-specific frequency switch operations. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 496 | BUG_ON(!rcg->set_rate); | 
|  | 497 | rcg->set_rate(rcg, nf); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 498 |  | 
|  | 499 | /* | 
| Matt Wagantall | 0625ea0 | 2011-07-13 18:51:56 -0700 | [diff] [blame] | 500 | * Current freq must be updated before __rcg_clk_enable_reg() | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 501 | * is called to make sure the MNCNTR_EN bit is set correctly. | 
|  | 502 | */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 503 | rcg->current_freq = nf; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 504 |  | 
|  | 505 | /* Enable any clocks that were disabled. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 506 | if (!rcg->bank_info) { | 
|  | 507 | if (rcg->enabled) | 
|  | 508 | __rcg_clk_enable_reg(rcg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 509 | /* Enable only branches that were ON before. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 510 | list_for_each_entry(chld, &rcg->c.children, siblings) { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 511 | struct branch_clk *x = to_branch_clk(chld); | 
|  | 512 | if (x->enabled) | 
|  | 513 | __branch_clk_enable_reg(&x->b, x->c.dbg_name); | 
|  | 514 | } | 
|  | 515 | } | 
|  | 516 |  | 
|  | 517 | spin_unlock(&local_clock_reg_lock); | 
|  | 518 |  | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 519 | /* Release source requirements of the old freq. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 520 | if (rcg->enabled) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 521 | clk_disable(cf->src_clk); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 522 |  | 
|  | 523 | return rc; | 
|  | 524 | } | 
|  | 525 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 526 | /* Check if a clock is currently enabled. */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 527 | static int rcg_clk_is_enabled(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 528 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 529 | return to_rcg_clk(c)->enabled; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 530 | } | 
|  | 531 |  | 
|  | 532 | /* Return a supported rate that's at least the specified rate. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 533 | static long rcg_clk_round_rate(struct clk *c, unsigned long rate) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 534 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 535 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 536 | struct clk_freq_tbl *f; | 
|  | 537 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 538 | for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 539 | if (f->freq_hz >= rate) | 
|  | 540 | return f->freq_hz; | 
|  | 541 |  | 
|  | 542 | return -EPERM; | 
|  | 543 | } | 
|  | 544 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 545 | /* Return the nth supported frequency for a given clock. */ | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 546 | static int rcg_clk_list_rate(struct clk *c, unsigned n) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 547 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 548 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 549 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 550 | if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 551 | return -ENXIO; | 
|  | 552 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 553 | return (rcg->freq_tbl + n)->freq_hz; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 554 | } | 
|  | 555 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 556 | static struct clk *rcg_clk_get_parent(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 557 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 558 | return to_rcg_clk(c)->current_freq->src_clk; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 559 | } | 
|  | 560 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 561 | /* Disable hw clock gating if not set at boot */ | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 562 | enum handoff branch_handoff(struct branch *b, struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 563 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 564 | if (!branch_in_hwcg_mode(b)) { | 
|  | 565 | b->hwcg_mask = 0; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 566 | c->flags &= ~CLKFLAG_HWCG; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 567 | if (readl_relaxed(b->ctl_reg) & b->en_mask) | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 568 | return HANDOFF_ENABLED_CLK; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 569 | } else { | 
|  | 570 | c->flags |= CLKFLAG_HWCG; | 
|  | 571 | } | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 572 | return HANDOFF_DISABLED_CLK; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 573 | } | 
|  | 574 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 575 | static enum handoff branch_clk_handoff(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 576 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 577 | struct branch_clk *br = to_branch_clk(c); | 
|  | 578 | return branch_handoff(&br->b, &br->c); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 579 | } | 
|  | 580 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 581 | static enum handoff rcg_clk_handoff(struct clk *c) | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 582 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 583 | struct rcg_clk *rcg = to_rcg_clk(c); | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 584 | uint32_t ctl_val, ns_val, md_val, ns_mask; | 
|  | 585 | struct clk_freq_tbl *freq; | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 586 | enum handoff ret; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 587 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 588 | ctl_val = readl_relaxed(rcg->b.ctl_reg); | 
|  | 589 | ret = branch_handoff(&rcg->b, &rcg->c); | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 590 | if (ret == HANDOFF_DISABLED_CLK) | 
|  | 591 | return HANDOFF_DISABLED_CLK; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 592 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 593 | if (rcg->bank_info) { | 
|  | 594 | const struct bank_masks *bank_masks = rcg->bank_info; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 595 | const struct bank_mask_info *bank_info; | 
| Stephen Boyd | c78d9a7 | 2011-07-20 00:46:24 -0700 | [diff] [blame] | 596 | if (!(ctl_val & bank_masks->bank_sel_mask)) | 
|  | 597 | bank_info = &bank_masks->bank0_mask; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 598 | else | 
| Stephen Boyd | c78d9a7 | 2011-07-20 00:46:24 -0700 | [diff] [blame] | 599 | bank_info = &bank_masks->bank1_mask; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 600 |  | 
|  | 601 | ns_mask = bank_info->ns_mask; | 
| Tianyi Gou | e46938b | 2012-01-31 12:30:12 -0800 | [diff] [blame] | 602 | md_val = bank_info->md_reg ? | 
|  | 603 | readl_relaxed(bank_info->md_reg) : 0; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 604 | } else { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 605 | ns_mask = rcg->ns_mask; | 
|  | 606 | md_val = rcg->md_reg ? readl_relaxed(rcg->md_reg) : 0; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 607 | } | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 608 | if (!ns_mask) | 
|  | 609 | return HANDOFF_UNKNOWN_RATE; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 610 | ns_val = readl_relaxed(rcg->ns_reg) & ns_mask; | 
|  | 611 | for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) { | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 612 | if ((freq->ns_val & ns_mask) == ns_val && | 
| Matt Wagantall | 2a59b21 | 2012-06-12 19:16:01 -0700 | [diff] [blame] | 613 | (!freq->md_val || freq->md_val == md_val)) | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 614 | break; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 615 | } | 
|  | 616 | if (freq->freq_hz == FREQ_END) | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 617 | return HANDOFF_UNKNOWN_RATE; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 618 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 619 | rcg->current_freq = freq; | 
| Stephen Boyd | e891ca3 | 2012-03-19 12:16:36 -0700 | [diff] [blame] | 620 | c->rate = freq->freq_hz; | 
| Matt Wagantall | 271a6cd | 2011-09-20 16:06:31 -0700 | [diff] [blame] | 621 |  | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 622 | return HANDOFF_ENABLED_CLK; | 
| Matt Wagantall | 14dc2af | 2011-08-12 13:16:06 -0700 | [diff] [blame] | 623 | } | 
|  | 624 |  | 
| Matt Wagantall | ae05322 | 2012-05-14 19:42:07 -0700 | [diff] [blame] | 625 | struct clk_ops clk_ops_empty; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 626 |  | 
|  | 627 | struct fixed_clk gnd_clk = { | 
|  | 628 | .c = { | 
|  | 629 | .dbg_name = "ground_clk", | 
| Matt Wagantall | ae05322 | 2012-05-14 19:42:07 -0700 | [diff] [blame] | 630 | .ops = &clk_ops_empty, | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 631 | CLK_INIT(gnd_clk.c), | 
|  | 632 | }, | 
|  | 633 | }; | 
|  | 634 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 635 | static int branch_clk_enable(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 636 | { | 
|  | 637 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 638 | struct branch_clk *br = to_branch_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 639 |  | 
|  | 640 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 641 | __branch_clk_enable_reg(&br->b, br->c.dbg_name); | 
|  | 642 | br->enabled = true; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 643 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 644 |  | 
|  | 645 | return 0; | 
|  | 646 | } | 
|  | 647 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 648 | static void branch_clk_disable(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 649 | { | 
|  | 650 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 651 | struct branch_clk *br = to_branch_clk(c); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 652 |  | 
|  | 653 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 654 | __branch_clk_disable_reg(&br->b, br->c.dbg_name); | 
|  | 655 | br->enabled = false; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 656 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 657 | } | 
|  | 658 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 659 | static struct clk *branch_clk_get_parent(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 660 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 661 | return to_branch_clk(c)->parent; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 662 | } | 
|  | 663 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 664 | static int branch_clk_is_enabled(struct clk *c) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 665 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 666 | return to_branch_clk(c)->enabled; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 667 | } | 
|  | 668 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 669 | static void branch_enable_hwcg(struct branch *b) | 
|  | 670 | { | 
|  | 671 | unsigned long flags; | 
|  | 672 | u32 reg_val; | 
|  | 673 |  | 
|  | 674 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
|  | 675 | reg_val = readl_relaxed(b->hwcg_reg); | 
|  | 676 | reg_val |= b->hwcg_mask; | 
|  | 677 | writel_relaxed(reg_val, b->hwcg_reg); | 
|  | 678 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 679 | } | 
|  | 680 |  | 
|  | 681 | static void branch_disable_hwcg(struct branch *b) | 
|  | 682 | { | 
|  | 683 | unsigned long flags; | 
|  | 684 | u32 reg_val; | 
|  | 685 |  | 
|  | 686 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
|  | 687 | reg_val = readl_relaxed(b->hwcg_reg); | 
|  | 688 | reg_val &= ~b->hwcg_mask; | 
|  | 689 | writel_relaxed(reg_val, b->hwcg_reg); | 
|  | 690 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 691 | } | 
|  | 692 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 693 | static void branch_clk_enable_hwcg(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 694 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 695 | branch_enable_hwcg(&to_branch_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 696 | } | 
|  | 697 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 698 | static void branch_clk_disable_hwcg(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 699 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 700 | branch_disable_hwcg(&to_branch_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 701 | } | 
|  | 702 |  | 
| Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 703 | static int branch_set_flags(struct branch *b, unsigned flags) | 
|  | 704 | { | 
|  | 705 | unsigned long irq_flags; | 
|  | 706 | u32 reg_val; | 
|  | 707 | int ret = 0; | 
|  | 708 |  | 
|  | 709 | if (!b->retain_reg) | 
|  | 710 | return -EPERM; | 
|  | 711 |  | 
|  | 712 | spin_lock_irqsave(&local_clock_reg_lock, irq_flags); | 
|  | 713 | reg_val = readl_relaxed(b->retain_reg); | 
|  | 714 | switch (flags) { | 
|  | 715 | case CLKFLAG_RETAIN: | 
|  | 716 | reg_val |= b->retain_mask; | 
|  | 717 | break; | 
|  | 718 | case CLKFLAG_NORETAIN: | 
|  | 719 | reg_val &= ~b->retain_mask; | 
|  | 720 | break; | 
|  | 721 | default: | 
|  | 722 | ret = -EINVAL; | 
|  | 723 | } | 
|  | 724 | writel_relaxed(reg_val, b->retain_reg); | 
|  | 725 | spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags); | 
|  | 726 |  | 
|  | 727 | return ret; | 
|  | 728 | } | 
|  | 729 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 730 | static int branch_clk_set_flags(struct clk *clk, unsigned flags) | 
| Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 731 | { | 
|  | 732 | return branch_set_flags(&to_branch_clk(clk)->b, flags); | 
|  | 733 | } | 
|  | 734 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 735 | static int branch_clk_in_hwcg_mode(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 736 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 737 | return branch_in_hwcg_mode(&to_branch_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 738 | } | 
|  | 739 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 740 | static void rcg_clk_enable_hwcg(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 741 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 742 | branch_enable_hwcg(&to_rcg_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 743 | } | 
|  | 744 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 745 | static void rcg_clk_disable_hwcg(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 746 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 747 | branch_disable_hwcg(&to_rcg_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 748 | } | 
|  | 749 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 750 | static int rcg_clk_in_hwcg_mode(struct clk *c) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 751 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 752 | return branch_in_hwcg_mode(&to_rcg_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 753 | } | 
|  | 754 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 755 | static int rcg_clk_set_flags(struct clk *clk, unsigned flags) | 
| Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 756 | { | 
|  | 757 | return branch_set_flags(&to_rcg_clk(clk)->b, flags); | 
|  | 758 | } | 
|  | 759 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 760 | int branch_reset(struct branch *b, enum clk_reset_action action) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 761 | { | 
|  | 762 | int ret = 0; | 
|  | 763 | u32 reg_val; | 
|  | 764 | unsigned long flags; | 
|  | 765 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 766 | if (!b->reset_reg) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 767 | return -EPERM; | 
|  | 768 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 769 | /* Disable hw gating when asserting a reset */ | 
|  | 770 | if (b->hwcg_mask && action == CLK_RESET_ASSERT) | 
|  | 771 | branch_disable_hwcg(b); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 772 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 773 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
|  | 774 | /* Assert/Deassert reset */ | 
|  | 775 | reg_val = readl_relaxed(b->reset_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 776 | switch (action) { | 
|  | 777 | case CLK_RESET_ASSERT: | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 778 | reg_val |= b->reset_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 779 | break; | 
|  | 780 | case CLK_RESET_DEASSERT: | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 781 | reg_val &= ~b->reset_mask; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 782 | break; | 
|  | 783 | default: | 
|  | 784 | ret = -EINVAL; | 
|  | 785 | } | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 786 | writel_relaxed(reg_val, b->reset_reg); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 787 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 788 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 789 | /* Enable hw gating when deasserting a reset */ | 
|  | 790 | if (b->hwcg_mask && action == CLK_RESET_DEASSERT) | 
|  | 791 | branch_enable_hwcg(b); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 792 | /* Make sure write is issued before returning. */ | 
|  | 793 | mb(); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 794 | return ret; | 
|  | 795 | } | 
|  | 796 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 797 | static int branch_clk_reset(struct clk *c, enum clk_reset_action action) | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 798 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 799 | return branch_reset(&to_branch_clk(c)->b, action); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 800 | } | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 801 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 802 | struct clk_ops clk_ops_branch = { | 
|  | 803 | .enable = branch_clk_enable, | 
|  | 804 | .disable = branch_clk_disable, | 
|  | 805 | .enable_hwcg = branch_clk_enable_hwcg, | 
|  | 806 | .disable_hwcg = branch_clk_disable_hwcg, | 
|  | 807 | .in_hwcg_mode = branch_clk_in_hwcg_mode, | 
|  | 808 | .auto_off = branch_clk_disable, | 
|  | 809 | .is_enabled = branch_clk_is_enabled, | 
|  | 810 | .reset = branch_clk_reset, | 
|  | 811 | .get_parent = branch_clk_get_parent, | 
|  | 812 | .handoff = branch_clk_handoff, | 
|  | 813 | .set_flags = branch_clk_set_flags, | 
|  | 814 | }; | 
|  | 815 |  | 
|  | 816 | struct clk_ops clk_ops_reset = { | 
|  | 817 | .reset = branch_clk_reset, | 
|  | 818 | }; | 
|  | 819 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 820 | static int rcg_clk_reset(struct clk *c, enum clk_reset_action action) | 
| Stephen Boyd | 7bf2814 | 2011-12-07 00:30:52 -0800 | [diff] [blame] | 821 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 822 | return branch_reset(&to_rcg_clk(c)->b, action); | 
| Stephen Boyd | 7bf2814 | 2011-12-07 00:30:52 -0800 | [diff] [blame] | 823 | } | 
|  | 824 |  | 
| Stephen Boyd | 409b8b4 | 2012-04-10 12:12:56 -0700 | [diff] [blame] | 825 | struct clk_ops clk_ops_rcg = { | 
|  | 826 | .enable = rcg_clk_enable, | 
|  | 827 | .disable = rcg_clk_disable, | 
|  | 828 | .enable_hwcg = rcg_clk_enable_hwcg, | 
|  | 829 | .disable_hwcg = rcg_clk_disable_hwcg, | 
|  | 830 | .in_hwcg_mode = rcg_clk_in_hwcg_mode, | 
|  | 831 | .auto_off = rcg_clk_disable, | 
|  | 832 | .handoff = rcg_clk_handoff, | 
|  | 833 | .set_rate = rcg_clk_set_rate, | 
|  | 834 | .list_rate = rcg_clk_list_rate, | 
|  | 835 | .is_enabled = rcg_clk_is_enabled, | 
|  | 836 | .round_rate = rcg_clk_round_rate, | 
|  | 837 | .reset = rcg_clk_reset, | 
|  | 838 | .get_parent = rcg_clk_get_parent, | 
|  | 839 | .set_flags = rcg_clk_set_flags, | 
|  | 840 | }; | 
|  | 841 |  | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 842 | static int cdiv_clk_enable(struct clk *c) | 
|  | 843 | { | 
|  | 844 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 845 | struct cdiv_clk *cdiv = to_cdiv_clk(c); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 846 |  | 
|  | 847 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 848 | __branch_clk_enable_reg(&cdiv->b, cdiv->c.dbg_name); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 849 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 850 |  | 
|  | 851 | return 0; | 
|  | 852 | } | 
|  | 853 |  | 
|  | 854 | static void cdiv_clk_disable(struct clk *c) | 
|  | 855 | { | 
|  | 856 | unsigned long flags; | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 857 | struct cdiv_clk *cdiv = to_cdiv_clk(c); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 858 |  | 
|  | 859 | spin_lock_irqsave(&local_clock_reg_lock, flags); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 860 | __branch_clk_disable_reg(&cdiv->b, cdiv->c.dbg_name); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 861 | spin_unlock_irqrestore(&local_clock_reg_lock, flags); | 
|  | 862 | } | 
|  | 863 |  | 
|  | 864 | static int cdiv_clk_set_rate(struct clk *c, unsigned long rate) | 
|  | 865 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 866 | struct cdiv_clk *cdiv = to_cdiv_clk(c); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 867 | u32 reg_val; | 
|  | 868 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 869 | if (rate > cdiv->max_div) | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 870 | return -EINVAL; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 871 |  | 
|  | 872 | spin_lock(&local_clock_reg_lock); | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 873 | reg_val = readl_relaxed(cdiv->ns_reg); | 
|  | 874 | reg_val &= ~(cdiv->ext_mask | (cdiv->max_div - 1) << cdiv->div_offset); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 875 | /* Non-zero rates mean set a divider, zero means use external input */ | 
|  | 876 | if (rate) | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 877 | reg_val |= (rate - 1) << cdiv->div_offset; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 878 | else | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 879 | reg_val |= cdiv->ext_mask; | 
|  | 880 | writel_relaxed(reg_val, cdiv->ns_reg); | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 881 | spin_unlock(&local_clock_reg_lock); | 
|  | 882 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 883 | cdiv->cur_div = rate; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 884 | return 0; | 
|  | 885 | } | 
|  | 886 |  | 
|  | 887 | static unsigned long cdiv_clk_get_rate(struct clk *c) | 
|  | 888 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 889 | return to_cdiv_clk(c)->cur_div; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 890 | } | 
|  | 891 |  | 
|  | 892 | static long cdiv_clk_round_rate(struct clk *c, unsigned long rate) | 
|  | 893 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 894 | return rate > to_cdiv_clk(c)->max_div ? -EPERM : rate; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 895 | } | 
|  | 896 |  | 
|  | 897 | static int cdiv_clk_list_rate(struct clk *c, unsigned n) | 
|  | 898 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 899 | return n > to_cdiv_clk(c)->max_div ? -ENXIO : n; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 900 | } | 
|  | 901 |  | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 902 | static enum handoff cdiv_clk_handoff(struct clk *c) | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 903 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 904 | struct cdiv_clk *cdiv = to_cdiv_clk(c); | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 905 | enum handoff ret; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 906 | u32 reg_val; | 
|  | 907 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 908 | ret = branch_handoff(&cdiv->b, &cdiv->c); | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 909 | if (ret == HANDOFF_DISABLED_CLK) | 
|  | 910 | return ret; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 911 |  | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 912 | reg_val = readl_relaxed(cdiv->ns_reg); | 
|  | 913 | if (reg_val & cdiv->ext_mask) { | 
|  | 914 | cdiv->cur_div = 0; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 915 | } else { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 916 | reg_val >>= cdiv->div_offset; | 
|  | 917 | cdiv->cur_div = (reg_val & (cdiv->max_div - 1)) + 1; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 918 | } | 
|  | 919 |  | 
| Matt Wagantall | a15833b | 2012-04-03 11:00:56 -0700 | [diff] [blame] | 920 | return HANDOFF_ENABLED_CLK; | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 921 | } | 
|  | 922 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 923 | static void cdiv_clk_enable_hwcg(struct clk *c) | 
|  | 924 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 925 | branch_enable_hwcg(&to_cdiv_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 926 | } | 
|  | 927 |  | 
|  | 928 | static void cdiv_clk_disable_hwcg(struct clk *c) | 
|  | 929 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 930 | branch_disable_hwcg(&to_cdiv_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 931 | } | 
|  | 932 |  | 
|  | 933 | static int cdiv_clk_in_hwcg_mode(struct clk *c) | 
|  | 934 | { | 
| Matt Wagantall | f82f294 | 2012-01-27 13:56:13 -0800 | [diff] [blame^] | 935 | return branch_in_hwcg_mode(&to_cdiv_clk(c)->b); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 936 | } | 
|  | 937 |  | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 938 | struct clk_ops clk_ops_cdiv = { | 
|  | 939 | .enable = cdiv_clk_enable, | 
|  | 940 | .disable = cdiv_clk_disable, | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 941 | .in_hwcg_mode = cdiv_clk_in_hwcg_mode, | 
|  | 942 | .enable_hwcg = cdiv_clk_enable_hwcg, | 
|  | 943 | .disable_hwcg = cdiv_clk_disable_hwcg, | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 944 | .auto_off = cdiv_clk_disable, | 
|  | 945 | .handoff = cdiv_clk_handoff, | 
|  | 946 | .set_rate = cdiv_clk_set_rate, | 
|  | 947 | .get_rate = cdiv_clk_get_rate, | 
|  | 948 | .list_rate = cdiv_clk_list_rate, | 
|  | 949 | .round_rate = cdiv_clk_round_rate, | 
| Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 950 | }; |