blob: 8096c10761ca76494d4c3c0359c682be3ef5b573 [file] [log] [blame]
Stephen Boyda6835112012-01-26 14:40:05 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <mach/clk.h>
16
17#include "rpm_resources.h"
18#include "clock.h"
19#include "clock-rpm.h"
20
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070021#define __clk_rpmrs_set_rate(r, value, ctx, noirq) \
22 ((r)->rpmrs_data->set_rate_fn((r), (value), (ctx), (noirq)))
23
24#define clk_rpmrs_set_rate_sleep(r, value) \
25 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id, 0)
26
27#define clk_rpmrs_set_rate_sleep_noirq(r, value) \
28 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id, 1)
29
30#define clk_rpmrs_set_rate_active(r, value) \
31 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id, 0)
32
33#define clk_rpmrs_set_rate_active_noirq(r, value) \
34 __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id, 1)
35
36static int clk_rpmrs_set_rate(struct rpm_clk *r, uint32_t value,
37 uint32_t context, int noirq)
38{
39 struct msm_rpm_iv_pair iv = {
40 .id = r->rpm_clk_id,
41 .value = value,
42 };
43 if (noirq)
44 return msm_rpmrs_set_noirq(context, &iv, 1);
45 else
46 return msm_rpmrs_set(context, &iv, 1);
47}
48
49static int clk_rpmrs_get_rate(struct rpm_clk *r)
50{
51 int rc;
52 struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
53 rc = msm_rpm_get_status(&iv, 1);
Stephen Boydc7fc3b12012-05-17 14:42:46 -070054 return (rc < 0) ? rc : iv.value * r->factor;
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070055}
56
Vikram Mulukutlad1c2bf62012-06-22 14:55:28 -070057#define RPM_SMD_KEY_RATE 0x007A484B
58#define RPM_SMD_KEY_ENABLE 0x62616E45
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -070059
60static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
61 uint32_t context, int noirq)
62{
Vikram Mulukutlad1c2bf62012-06-22 14:55:28 -070063 u32 rpm_key = r->branch ? RPM_SMD_KEY_ENABLE : RPM_SMD_KEY_RATE;
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -070064 struct msm_rpm_kvp kvp = {
Vikram Mulukutlad1c2bf62012-06-22 14:55:28 -070065 .key = rpm_key,
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -070066 .data = (void *)&value,
67 .length = sizeof(value),
68 };
69
70 if (noirq)
71 return msm_rpm_send_message_noirq(context,
72 r->rpm_res_type, r->rpm_clk_id, &kvp, 1);
73 else
74 return msm_rpm_send_message(context, r->rpm_res_type,
75 r->rpm_clk_id, &kvp, 1);
76}
77
Vikram Mulukutla0f44f922012-05-16 21:52:26 -070078struct clk_rpmrs_data {
79 int (*set_rate_fn)(struct rpm_clk *r, uint32_t value,
80 uint32_t context, int noirq);
81 int (*get_rate_fn)(struct rpm_clk *r);
82 int ctx_active_id;
83 int ctx_sleep_id;
84};
85
86struct clk_rpmrs_data clk_rpmrs_data = {
87 .set_rate_fn = clk_rpmrs_set_rate,
88 .get_rate_fn = clk_rpmrs_get_rate,
89 .ctx_active_id = MSM_RPM_CTX_SET_0,
90 .ctx_sleep_id = MSM_RPM_CTX_SET_SLEEP,
91};
92
Vikram Mulukutlaa085dc82012-05-18 11:21:44 -070093struct clk_rpmrs_data clk_rpmrs_data_smd = {
94 .set_rate_fn = clk_rpmrs_set_rate_smd,
95 .ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
96 .ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
97};
98
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099static DEFINE_SPINLOCK(rpm_clock_lock);
100
101static int rpm_clk_enable(struct clk *clk)
102{
103 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104 struct rpm_clk *r = to_rpm_clk(clk);
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700105 uint32_t value;
Stephen Boyda6835112012-01-26 14:40:05 -0800106 int rc = 0;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700107 unsigned long this_khz, this_sleep_khz;
108 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109 struct rpm_clk *peer = r->peer;
110
111 spin_lock_irqsave(&rpm_clock_lock, flags);
112
113 this_khz = r->last_set_khz;
114 /* Don't send requests to the RPM if the rate has not been set. */
115 if (this_khz == 0)
116 goto out;
117
118 this_sleep_khz = r->last_set_sleep_khz;
119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 /* Take peer clock's rate into account only if it's enabled. */
121 if (peer->enabled) {
122 peer_khz = peer->last_set_khz;
123 peer_sleep_khz = peer->last_set_sleep_khz;
124 }
125
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700126 value = max(this_khz, peer_khz);
Stephen Boyda6835112012-01-26 14:40:05 -0800127 if (r->branch)
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700128 value = !!value;
Stephen Boyda6835112012-01-26 14:40:05 -0800129
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700130 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 if (rc)
132 goto out;
133
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700134 value = max(this_sleep_khz, peer_sleep_khz);
Stephen Boyda6835112012-01-26 14:40:05 -0800135 if (r->branch)
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700136 value = !!value;
137
138 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Matt Wagantall735f01a2011-08-12 12:40:28 -0700139 if (rc) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700140 /* Undo the active set vote and restore it to peer_khz */
141 value = peer_khz;
142 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Matt Wagantall735f01a2011-08-12 12:40:28 -0700143 }
144
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700145out:
146 if (!rc)
147 r->enabled = true;
148
149 spin_unlock_irqrestore(&rpm_clock_lock, flags);
150
151 return rc;
152}
153
154static void rpm_clk_disable(struct clk *clk)
155{
156 unsigned long flags;
157 struct rpm_clk *r = to_rpm_clk(clk);
158
159 spin_lock_irqsave(&rpm_clock_lock, flags);
160
161 if (r->last_set_khz) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700162 uint32_t value;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700164 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165 int rc;
166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 /* Take peer clock's rate into account only if it's enabled. */
168 if (peer->enabled) {
169 peer_khz = peer->last_set_khz;
170 peer_sleep_khz = peer->last_set_sleep_khz;
171 }
172
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700173 value = r->branch ? !!peer_khz : peer_khz;
174 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 if (rc)
176 goto out;
177
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700178 value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
179 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 }
181 r->enabled = false;
182out:
183 spin_unlock_irqrestore(&rpm_clock_lock, flags);
184
185 return;
186}
187
Matt Wagantall77952c42011-11-08 18:45:48 -0800188static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189{
190 unsigned long flags;
191 struct rpm_clk *r = to_rpm_clk(clk);
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700192 unsigned long this_khz, this_sleep_khz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 int rc = 0;
194
Stephen Boydc7fc3b12012-05-17 14:42:46 -0700195 this_khz = DIV_ROUND_UP(rate, r->factor);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196
197 spin_lock_irqsave(&rpm_clock_lock, flags);
198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 /* Active-only clocks don't care what the rate is during sleep. So,
200 * they vote for zero. */
201 if (r->active_only)
202 this_sleep_khz = 0;
203 else
204 this_sleep_khz = this_khz;
205
206 if (r->enabled) {
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700207 uint32_t value;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 struct rpm_clk *peer = r->peer;
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700209 unsigned long peer_khz = 0, peer_sleep_khz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211 /* Take peer clock's rate into account only if it's enabled. */
212 if (peer->enabled) {
213 peer_khz = peer->last_set_khz;
214 peer_sleep_khz = peer->last_set_sleep_khz;
215 }
216
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700217 value = max(this_khz, peer_khz);
218 rc = clk_rpmrs_set_rate_active_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 if (rc)
220 goto out;
221
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700222 value = max(this_sleep_khz, peer_sleep_khz);
223 rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 }
225 if (!rc) {
226 r->last_set_khz = this_khz;
227 r->last_set_sleep_khz = this_sleep_khz;
228 }
229
230out:
231 spin_unlock_irqrestore(&rpm_clock_lock, flags);
232
233 return rc;
234}
235
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700236static unsigned long rpm_clk_get_rate(struct clk *clk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237{
238 struct rpm_clk *r = to_rpm_clk(clk);
Vikram Mulukutla0f44f922012-05-16 21:52:26 -0700239 if (r->rpmrs_data->get_rate_fn)
240 return r->rpmrs_data->get_rate_fn(r);
241 else
Vikram Mulukutla9505c3c2012-06-18 19:08:40 -0700242 return clk->rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243}
244
245static int rpm_clk_is_enabled(struct clk *clk)
246{
247 return !!(rpm_clk_get_rate(clk));
248}
249
Matt Wagantall9de3bfb2011-11-03 20:13:12 -0700250static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251{
252 /* Not supported. */
253 return rate;
254}
255
256static bool rpm_clk_is_local(struct clk *clk)
257{
258 return false;
259}
260
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700261static enum handoff rpm_clk_handoff(struct clk *clk)
262{
263 struct rpm_clk *r = to_rpm_clk(clk);
264 struct msm_rpm_iv_pair iv = { r->rpm_status_id };
265 int rc;
266
267 /*
268 * Querying an RPM clock's status will return 0 unless the clock's
269 * rate has previously been set through the RPM. When handing off,
270 * assume these clocks are enabled (unless the RPM call fails) so
271 * child clocks of these RPM clocks can still be handed off.
272 */
273 rc = msm_rpm_get_status(&iv, 1);
274 if (rc < 0)
275 return HANDOFF_DISABLED_CLK;
276
Matt Wagantall215de5d2012-06-22 18:33:17 -0700277 if (!r->branch) {
278 r->last_set_khz = iv.value;
279 r->last_set_sleep_khz = iv.value;
Stephen Boydc7fc3b12012-05-17 14:42:46 -0700280 clk->rate = iv.value * r->factor;
Matt Wagantall215de5d2012-06-22 18:33:17 -0700281 }
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700282
283 return HANDOFF_ENABLED_CLK;
284}
285
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286struct clk_ops clk_ops_rpm = {
287 .enable = rpm_clk_enable,
288 .disable = rpm_clk_disable,
Matt Wagantall77952c42011-11-08 18:45:48 -0800289 .set_rate = rpm_clk_set_rate,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 .get_rate = rpm_clk_get_rate,
291 .is_enabled = rpm_clk_is_enabled,
292 .round_rate = rpm_clk_round_rate,
293 .is_local = rpm_clk_is_local,
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700294 .handoff = rpm_clk_handoff,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295};
Stephen Boyda6835112012-01-26 14:40:05 -0800296
297struct clk_ops clk_ops_rpm_branch = {
298 .enable = rpm_clk_enable,
299 .disable = rpm_clk_disable,
300 .is_local = rpm_clk_is_local,
Matt Wagantall39c94ef2012-05-16 13:24:41 -0700301 .handoff = rpm_clk_handoff,
Stephen Boyda6835112012-01-26 14:40:05 -0800302};