blob: 3a29d71915457b6ec399222f66565c677591bb9c [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070014#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <mach/msm_iomap.h>
16#include <mach/msm_bus.h>
17
18#include "kgsl.h"
19#include "kgsl_pwrscale.h"
20#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070021#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060023#define KGSL_PWRFLAGS_POWER_ON 0
24#define KGSL_PWRFLAGS_CLK_ON 1
25#define KGSL_PWRFLAGS_AXI_ON 2
26#define KGSL_PWRFLAGS_IRQ_ON 3
27
Lucille Sylvester10297892012-02-27 13:54:47 -070028#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070029#define UPDATE_BUSY_VAL 1000000
30#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060032struct clk_pair {
33 const char *name;
34 uint map;
35};
36
37struct clk_pair clks[KGSL_MAX_CLKS] = {
38 {
39 .name = "src_clk",
40 .map = KGSL_CLK_SRC,
41 },
42 {
43 .name = "core_clk",
44 .map = KGSL_CLK_CORE,
45 },
46 {
47 .name = "iface_clk",
48 .map = KGSL_CLK_IFACE,
49 },
50 {
51 .name = "mem_clk",
52 .map = KGSL_CLK_MEM,
53 },
54 {
55 .name = "mem_iface_clk",
56 .map = KGSL_CLK_MEM_IFACE,
57 },
58};
59
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
61 unsigned int new_level)
62{
63 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
64 if (new_level < (pwr->num_pwrlevels - 1) &&
65 new_level >= pwr->thermal_pwrlevel &&
66 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070067 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060068 int diff = new_level - pwr->active_pwrlevel;
69 int d = (diff > 0) ? 1 : -1;
70 int level = pwr->active_pwrlevel;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060072 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
Kedar Joshic11d0982012-02-07 10:59:49 +053073 (device->state == KGSL_STATE_NAP)) {
74 /*
75 * On some platforms, instability is caused on
76 * changing clock freq when the core is busy.
77 * Idle the gpu core before changing the clock freq.
78 */
79 if (pwr->idle_needed == true)
80 device->ftbl->idle(device,
81 KGSL_TIMEOUT_DEFAULT);
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060082 /* Don't shift by more than one level at a time to
83 * avoid glitches.
84 */
85 while (level != new_level) {
86 level += d;
87 clk_set_rate(pwr->grp_clks[0],
88 pwr->pwrlevels[level].gpu_freq);
89 }
Kedar Joshic11d0982012-02-07 10:59:49 +053090 }
Lucille Sylvester622927a2011-08-10 14:42:25 -060091 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092 if (pwr->pcl)
93 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070094 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060095 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070096 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060097 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070098 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
99 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 }
101}
102EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
103
104static int __gpuclk_store(int max, struct device *dev,
105 struct device_attribute *attr,
106 const char *buf, size_t count)
107{ int ret, i, delta = 5000000;
108 unsigned long val;
109 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600110 struct kgsl_pwrctrl *pwr;
111
112 if (device == NULL)
113 return 0;
114 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700115
116 ret = sscanf(buf, "%ld", &val);
117 if (ret != 1)
118 return count;
119
120 mutex_lock(&device->mutex);
121 for (i = 0; i < pwr->num_pwrlevels; i++) {
122 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
123 if (max)
124 pwr->thermal_pwrlevel = i;
125 break;
126 }
127 }
128
129 if (i == pwr->num_pwrlevels)
130 goto done;
131
132 /*
133 * If the current or requested clock speed is greater than the
134 * thermal limit, bump down immediately.
135 */
136
137 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
138 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
139 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
140 else if (!max)
141 kgsl_pwrctrl_pwrlevel_change(device, i);
142
143done:
144 mutex_unlock(&device->mutex);
145 return count;
146}
147
148static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
149 struct device_attribute *attr,
150 const char *buf, size_t count)
151{
152 return __gpuclk_store(1, dev, attr, buf, count);
153}
154
155static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
156 struct device_attribute *attr,
157 char *buf)
158{
159 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600160 struct kgsl_pwrctrl *pwr;
161 if (device == NULL)
162 return 0;
163 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 return snprintf(buf, PAGE_SIZE, "%d\n",
165 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
166}
167
168static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
169 struct device_attribute *attr,
170 const char *buf, size_t count)
171{
172 return __gpuclk_store(0, dev, attr, buf, count);
173}
174
175static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
176 struct device_attribute *attr,
177 char *buf)
178{
179 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600180 struct kgsl_pwrctrl *pwr;
181 if (device == NULL)
182 return 0;
183 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 return snprintf(buf, PAGE_SIZE, "%d\n",
185 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
186}
187
188static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
189 struct device_attribute *attr,
190 const char *buf, size_t count)
191{
192 char temp[20];
193 unsigned long val;
194 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600195 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 int rc;
197
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600198 if (device == NULL)
199 return 0;
200 pwr = &device->pwrctrl;
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 snprintf(temp, sizeof(temp), "%.*s",
203 (int)min(count, sizeof(temp) - 1), buf);
204 rc = strict_strtoul(temp, 0, &val);
205 if (rc)
206 return rc;
207
208 mutex_lock(&device->mutex);
209
210 if (val == 1)
211 pwr->nap_allowed = true;
212 else if (val == 0)
213 pwr->nap_allowed = false;
214
215 mutex_unlock(&device->mutex);
216
217 return count;
218}
219
220static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
221 struct device_attribute *attr,
222 char *buf)
223{
224 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600225 if (device == NULL)
226 return 0;
227 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228}
229
230
231static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
232 struct device_attribute *attr,
233 const char *buf, size_t count)
234{
235 char temp[20];
236 unsigned long val;
237 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600238 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239 const long div = 1000/HZ;
240 static unsigned int org_interval_timeout = 1;
241 int rc;
242
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600243 if (device == NULL)
244 return 0;
245 pwr = &device->pwrctrl;
246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247 snprintf(temp, sizeof(temp), "%.*s",
248 (int)min(count, sizeof(temp) - 1), buf);
249 rc = strict_strtoul(temp, 0, &val);
250 if (rc)
251 return rc;
252
253 if (org_interval_timeout == 1)
254 org_interval_timeout = pwr->interval_timeout;
255
256 mutex_lock(&device->mutex);
257
258 /* Let the timeout be requested in ms, but convert to jiffies. */
259 val /= div;
260 if (val >= org_interval_timeout)
261 pwr->interval_timeout = val;
262
263 mutex_unlock(&device->mutex);
264
265 return count;
266}
267
268static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
269 struct device_attribute *attr,
270 char *buf)
271{
272 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600273 if (device == NULL)
274 return 0;
275 return snprintf(buf, PAGE_SIZE, "%d\n",
276 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277}
278
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700279static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
280 struct device_attribute *attr,
281 char *buf)
282{
283 int ret;
284 struct kgsl_device *device = kgsl_device_from_dev(dev);
285 struct kgsl_busy *b = &device->pwrctrl.busy;
286 ret = snprintf(buf, 17, "%7d %7d\n",
287 b->on_time_old, b->time_old);
288 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
289 b->on_time_old = 0;
290 b->time_old = 0;
291 }
292 return ret;
293}
294
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
296DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
297 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530298DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
300 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700301DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
302 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303
304static const struct device_attribute *pwrctrl_attr_list[] = {
305 &dev_attr_gpuclk,
306 &dev_attr_max_gpuclk,
307 &dev_attr_pwrnap,
308 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700309 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310 NULL
311};
312
313int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
314{
315 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
316}
317
318void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
319{
320 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
321}
322
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700323/* Track the amount of time the gpu is on vs the total system time. *
324 * Regularly update the percentage of busy time displayed by sysfs. */
325static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
326{
327 struct kgsl_busy *b = &device->pwrctrl.busy;
328 int elapsed;
329 if (b->start.tv_sec == 0)
330 do_gettimeofday(&(b->start));
331 do_gettimeofday(&(b->stop));
332 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
333 elapsed += b->stop.tv_usec - b->start.tv_usec;
334 b->time += elapsed;
335 if (on_time)
336 b->on_time += elapsed;
337 /* Update the output regularly and reset the counters. */
338 if ((b->time > UPDATE_BUSY_VAL) ||
339 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
340 b->on_time_old = b->on_time;
341 b->time_old = b->time;
342 b->on_time = 0;
343 b->time = 0;
344 }
345 do_gettimeofday(&(b->start));
346}
347
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600348void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
349 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350{
351 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
352 int i = 0;
353 if (state == KGSL_PWRFLAGS_OFF) {
354 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
355 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700356 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
358 if (pwr->grp_clks[i])
359 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700360 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700362 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 clk_set_rate(pwr->grp_clks[0],
364 pwr->pwrlevels[pwr->num_pwrlevels - 1].
365 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700366 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
367 if (pwr->grp_clks[i])
368 clk_unprepare(pwr->grp_clks[i]);
369 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700370 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371 }
372 } else if (state == KGSL_PWRFLAGS_ON) {
373 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
374 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700375 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700376 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700378 (device->state != KGSL_STATE_NAP)) {
379 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
380 if (pwr->grp_clks[i])
381 clk_prepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382 clk_set_rate(pwr->grp_clks[0],
383 pwr->pwrlevels[pwr->active_pwrlevel].
384 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700385 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386
387 /* as last step, enable grp_clk
388 this is to let GPU interrupt to come */
389 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
390 if (pwr->grp_clks[i])
391 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700392 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 }
394 }
395}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396
397void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
398{
399 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
400
401 if (state == KGSL_PWRFLAGS_OFF) {
402 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
403 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700404 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530405 if (pwr->ebi1_clk) {
406 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600407 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530408 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 if (pwr->pcl)
410 msm_bus_scale_client_update_request(pwr->pcl,
411 0);
412 }
413 } else if (state == KGSL_PWRFLAGS_ON) {
414 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
415 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700416 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530417 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600418 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530419 clk_set_rate(pwr->ebi1_clk,
420 pwr->pwrlevels[pwr->active_pwrlevel].
421 bus_freq);
422 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 if (pwr->pcl)
424 msm_bus_scale_client_update_request(pwr->pcl,
425 pwr->pwrlevels[pwr->active_pwrlevel].
426 bus_freq);
427 }
428 }
429}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430
431void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
432{
433 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
434
435 if (state == KGSL_PWRFLAGS_OFF) {
436 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
437 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700438 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 if (pwr->gpu_reg)
440 regulator_disable(pwr->gpu_reg);
441 }
442 } else if (state == KGSL_PWRFLAGS_ON) {
443 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
444 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700445 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 if (pwr->gpu_reg)
447 regulator_enable(pwr->gpu_reg);
448 }
449 }
450}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451
452void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
453{
454 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
455
456 if (state == KGSL_PWRFLAGS_ON) {
457 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
458 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700459 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 }
462 } else if (state == KGSL_PWRFLAGS_OFF) {
463 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
464 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700465 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600466 if (in_interrupt())
467 disable_irq_nosync(pwr->interrupt_num);
468 else
469 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470 }
471 }
472}
473EXPORT_SYMBOL(kgsl_pwrctrl_irq);
474
475int kgsl_pwrctrl_init(struct kgsl_device *device)
476{
477 int i, result = 0;
478 struct clk *clk;
479 struct platform_device *pdev =
480 container_of(device->parentdev, struct platform_device, dev);
481 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600482 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483
484 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600485 for (i = 0; i < KGSL_MAX_CLKS; i++) {
486 if (pdata->clk_map & clks[i].map) {
487 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 if (IS_ERR(clk))
489 goto clk_err;
490 pwr->grp_clks[i] = clk;
491 }
492 }
493 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600494 if (pwr->grp_clks[0] == NULL)
495 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496
497 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600498 if (pdata->set_grp_async != NULL)
499 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600501 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600503 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 result = -EINVAL;
505 goto done;
506 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600507 pwr->num_pwrlevels = pdata->num_levels;
508 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800509 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600510 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600512 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600514 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 gpu_freq) : 0;
516 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600517 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600518 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600519 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 }
521 /* Do not set_rate for targets in sync with AXI */
522 if (pwr->pwrlevels[0].gpu_freq > 0)
523 clk_set_rate(pwr->grp_clks[0], pwr->
524 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
525
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700526 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527 if (IS_ERR(pwr->gpu_reg))
528 pwr->gpu_reg = NULL;
529
530 pwr->power_flags = 0;
531
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600532 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530533 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600534 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700535 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700536 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 if (IS_ERR(pwr->ebi1_clk))
538 pwr->ebi1_clk = NULL;
539 else
540 clk_set_rate(pwr->ebi1_clk,
541 pwr->pwrlevels[pwr->active_pwrlevel].
542 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600543 if (pdata->bus_scale_table != NULL) {
544 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545 bus_scale_table);
546 if (!pwr->pcl) {
547 KGSL_PWR_ERR(device,
548 "msm_bus_scale_register_client failed: "
549 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600550 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 result = -EINVAL;
552 goto done;
553 }
554 }
555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700557 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 register_early_suspend(&device->display_off);
559 return result;
560
561clk_err:
562 result = PTR_ERR(clk);
563 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600564 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565
566done:
567 return result;
568}
569
570void kgsl_pwrctrl_close(struct kgsl_device *device)
571{
572 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
573 int i;
574
575 KGSL_PWR_INFO(device, "close device %d\n", device->id);
576
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700577 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 unregister_early_suspend(&device->display_off);
579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 clk_put(pwr->ebi1_clk);
581
582 if (pwr->pcl)
583 msm_bus_scale_unregister_client(pwr->pcl);
584
585 pwr->pcl = 0;
586
587 if (pwr->gpu_reg) {
588 regulator_put(pwr->gpu_reg);
589 pwr->gpu_reg = NULL;
590 }
591
592 for (i = 1; i < KGSL_MAX_CLKS; i++)
593 if (pwr->grp_clks[i]) {
594 clk_put(pwr->grp_clks[i]);
595 pwr->grp_clks[i] = NULL;
596 }
597
598 pwr->grp_clks[0] = NULL;
599 pwr->power_flags = 0;
600}
601
602void kgsl_idle_check(struct work_struct *work)
603{
604 struct kgsl_device *device = container_of(work, struct kgsl_device,
605 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700606 WARN_ON(device == NULL);
607 if (device == NULL)
608 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609
610 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Lucille Sylvester6e362412011-12-09 16:21:42 -0700612 kgsl_pwrscale_idle(device);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600613
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700614 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 mod_timer(&device->idle_timer,
616 jiffies +
617 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700618 /* If the GPU has been too busy to sleep, make sure *
619 * that is acurately reflected in the % busy numbers. */
620 device->pwrctrl.busy.no_nap_cnt++;
621 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
622 kgsl_pwrctrl_busy_time(device, true);
623 device->pwrctrl.busy.no_nap_cnt = 0;
624 }
625 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 } else if (device->state & (KGSL_STATE_HUNG |
627 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700628 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 }
630
631 mutex_unlock(&device->mutex);
632}
633
634void kgsl_timer(unsigned long data)
635{
636 struct kgsl_device *device = (struct kgsl_device *) data;
637
638 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530639 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -0700640 if (device->pwrctrl.restore_slumber ||
641 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700642 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
643 else
644 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 /* Have work run in a non-interrupt context. */
646 queue_work(device->work_queue, &device->idle_check_ws);
647 }
648}
649
650void kgsl_pre_hwaccess(struct kgsl_device *device)
651{
652 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700653 switch (device->state) {
654 case KGSL_STATE_ACTIVE:
655 return;
656 case KGSL_STATE_NAP:
657 case KGSL_STATE_SLEEP:
658 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700660 break;
661 case KGSL_STATE_SUSPEND:
662 kgsl_check_suspended(device);
663 break;
664 case KGSL_STATE_INIT:
665 case KGSL_STATE_HUNG:
666 case KGSL_STATE_DUMP_AND_RECOVER:
667 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
668 &device->pwrctrl.power_flags))
669 break;
670 else
671 KGSL_PWR_ERR(device,
672 "hw access while clocks off from state %d\n",
673 device->state);
674 break;
675 default:
676 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
677 device->state);
678 break;
679 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680}
681EXPORT_SYMBOL(kgsl_pre_hwaccess);
682
683void kgsl_check_suspended(struct kgsl_device *device)
684{
685 if (device->requested_state == KGSL_STATE_SUSPEND ||
686 device->state == KGSL_STATE_SUSPEND) {
687 mutex_unlock(&device->mutex);
688 wait_for_completion(&device->hwaccess_gate);
689 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700690 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 mutex_unlock(&device->mutex);
692 wait_for_completion(&device->recovery_gate);
693 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700694 } else if (device->state == KGSL_STATE_SLUMBER)
695 kgsl_pwrctrl_wake(device);
696}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697
Suman Tatiraju24569022011-10-27 11:11:12 -0700698static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700699_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700700{
Suman Tatiraju24569022011-10-27 11:11:12 -0700701 switch (device->state) {
702 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700703 if (!device->ftbl->isidle(device)) {
704 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
705 return -EBUSY;
706 }
707 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600708 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
709 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700710 if (device->idle_wakelock.name)
711 wake_unlock(&device->idle_wakelock);
Suman Tatiraju24569022011-10-27 11:11:12 -0700712 case KGSL_STATE_NAP:
713 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700714 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700715 break;
716 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700717 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700718 break;
719 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700720 return 0;
721}
722
723static void
724_sleep_accounting(struct kgsl_device *device)
725{
726 kgsl_pwrctrl_busy_time(device, false);
727 device->pwrctrl.busy.start.tv_sec = 0;
728 device->pwrctrl.time = 0;
729 kgsl_pwrscale_sleep(device);
730}
731
732static int
733_sleep(struct kgsl_device *device)
734{
735 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
736 switch (device->state) {
737 case KGSL_STATE_ACTIVE:
738 if (!device->ftbl->isidle(device)) {
739 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
740 return -EBUSY;
741 }
742 /* fall through */
743 case KGSL_STATE_NAP:
744 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
745 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
746 if (pwr->pwrlevels[0].gpu_freq > 0)
747 clk_set_rate(pwr->grp_clks[0],
748 pwr->pwrlevels[pwr->num_pwrlevels - 1].
749 gpu_freq);
750 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600751 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700752 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -0700753 wake_unlock(&device->idle_wakelock);
754 pm_qos_update_request(&device->pm_qos_req_dma,
755 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700756 break;
757 case KGSL_STATE_SLEEP:
758 case KGSL_STATE_SLUMBER:
759 break;
760 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700761 KGSL_PWR_WARN(device, "unhandled state %s\n",
762 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700763 break;
764 }
765 return 0;
766}
767
768static int
769_slumber(struct kgsl_device *device)
770{
771 switch (device->state) {
772 case KGSL_STATE_ACTIVE:
773 if (!device->ftbl->isidle(device)) {
774 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
775 device->pwrctrl.restore_slumber = true;
776 return -EBUSY;
777 }
778 /* fall through */
779 case KGSL_STATE_NAP:
780 case KGSL_STATE_SLEEP:
781 del_timer_sync(&device->idle_timer);
Lynus Vazfe4bede2012-04-06 11:53:30 -0700782 if (!device->pwrctrl.strtstp_sleepwake)
783 kgsl_pwrctrl_pwrlevel_change(device,
784 KGSL_PWRLEVEL_NOMINAL);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700785 device->pwrctrl.restore_slumber = true;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700786 device->ftbl->suspend_context(device);
787 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700788 _sleep_accounting(device);
789 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
790 if (device->idle_wakelock.name)
791 wake_unlock(&device->idle_wakelock);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700792 pm_qos_update_request(&device->pm_qos_req_dma,
793 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700794 break;
795 case KGSL_STATE_SLUMBER:
796 break;
797 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700798 KGSL_PWR_WARN(device, "unhandled state %s\n",
799 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700800 break;
801 }
802 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700803}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804
805/******************************************************************/
806/* Caller must hold the device mutex. */
807int kgsl_pwrctrl_sleep(struct kgsl_device *device)
808{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700809 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
811
812 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700813 switch (device->requested_state) {
814 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700815 status = _nap(device);
816 break;
817 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700818 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700819 break;
820 case KGSL_STATE_SLUMBER:
821 status = _slumber(device);
822 break;
823 default:
824 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
825 device->requested_state);
826 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
827 status = -EINVAL;
828 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700830 return status;
831}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700832EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700833
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834/******************************************************************/
835/* Caller must hold the device mutex. */
836void kgsl_pwrctrl_wake(struct kgsl_device *device)
837{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700838 int status;
839 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
840 switch (device->state) {
841 case KGSL_STATE_SLUMBER:
842 status = device->ftbl->start(device, 0);
843 if (status) {
844 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
845 KGSL_DRV_ERR(device, "start failed %d\n", status);
846 break;
847 }
848 /* fall through */
849 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
851 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700852 /* fall through */
853 case KGSL_STATE_NAP:
854 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600855 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700856 /* Enable state before turning on irq */
857 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
858 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
859 /* Re-enable HW access */
860 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861 jiffies + device->pwrctrl.interval_timeout);
Lucille Sylvester10297892012-02-27 13:54:47 -0700862 wake_lock(&device->idle_wakelock);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700863 if (device->pwrctrl.restore_slumber == false)
864 pm_qos_update_request(&device->pm_qos_req_dma,
865 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700866 case KGSL_STATE_ACTIVE:
867 break;
868 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700869 KGSL_PWR_WARN(device, "unhandled state %s\n",
870 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700871 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
872 break;
873 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874}
875EXPORT_SYMBOL(kgsl_pwrctrl_wake);
876
877void kgsl_pwrctrl_enable(struct kgsl_device *device)
878{
879 /* Order pwrrail/clk sequence based upon platform */
880 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600881 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
883}
884EXPORT_SYMBOL(kgsl_pwrctrl_enable);
885
886void kgsl_pwrctrl_disable(struct kgsl_device *device)
887{
888 /* Order pwrrail/clk sequence based upon platform */
889 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600890 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
892}
893EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700894
895void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
896{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700897 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700898 device->state = state;
899 device->requested_state = KGSL_STATE_NONE;
900}
901EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
902
903void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
904{
905 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700906 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700907 device->requested_state = state;
908}
909EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700910
911const char *kgsl_pwrstate_to_str(unsigned int state)
912{
913 switch (state) {
914 case KGSL_STATE_NONE:
915 return "NONE";
916 case KGSL_STATE_INIT:
917 return "INIT";
918 case KGSL_STATE_ACTIVE:
919 return "ACTIVE";
920 case KGSL_STATE_NAP:
921 return "NAP";
922 case KGSL_STATE_SLEEP:
923 return "SLEEP";
924 case KGSL_STATE_SUSPEND:
925 return "SUSPEND";
926 case KGSL_STATE_HUNG:
927 return "HUNG";
928 case KGSL_STATE_DUMP_AND_RECOVER:
929 return "DNR";
930 case KGSL_STATE_SLUMBER:
931 return "SLUMBER";
932 default:
933 break;
934 }
935 return "UNKNOWN";
936}
937EXPORT_SYMBOL(kgsl_pwrstate_to_str);
938