blob: 8701b672f8ba7990affcd215a6ccac03c1da6e05 [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
20
21#include "kgsl.h"
22#include "kgsl_pwrscale.h"
23#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070024#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060026#define KGSL_PWRFLAGS_POWER_ON 0
27#define KGSL_PWRFLAGS_CLK_ON 1
28#define KGSL_PWRFLAGS_AXI_ON 2
29#define KGSL_PWRFLAGS_IRQ_ON 3
30
Lucille Sylvester10297892012-02-27 13:54:47 -070031#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070032#define UPDATE_BUSY_VAL 1000000
33#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060035struct clk_pair {
36 const char *name;
37 uint map;
38};
39
40struct clk_pair clks[KGSL_MAX_CLKS] = {
41 {
42 .name = "src_clk",
43 .map = KGSL_CLK_SRC,
44 },
45 {
46 .name = "core_clk",
47 .map = KGSL_CLK_CORE,
48 },
49 {
50 .name = "iface_clk",
51 .map = KGSL_CLK_IFACE,
52 },
53 {
54 .name = "mem_clk",
55 .map = KGSL_CLK_MEM,
56 },
57 {
58 .name = "mem_iface_clk",
59 .map = KGSL_CLK_MEM_IFACE,
60 },
61};
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
64 unsigned int new_level)
65{
66 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
67 if (new_level < (pwr->num_pwrlevels - 1) &&
68 new_level >= pwr->thermal_pwrlevel &&
69 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070070 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060071 int diff = new_level - pwr->active_pwrlevel;
72 int d = (diff > 0) ? 1 : -1;
73 int level = pwr->active_pwrlevel;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060075 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
Kedar Joshic11d0982012-02-07 10:59:49 +053076 (device->state == KGSL_STATE_NAP)) {
77 /*
78 * On some platforms, instability is caused on
79 * changing clock freq when the core is busy.
80 * Idle the gpu core before changing the clock freq.
81 */
82 if (pwr->idle_needed == true)
83 device->ftbl->idle(device,
84 KGSL_TIMEOUT_DEFAULT);
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060085 /* Don't shift by more than one level at a time to
86 * avoid glitches.
87 */
88 while (level != new_level) {
89 level += d;
90 clk_set_rate(pwr->grp_clks[0],
91 pwr->pwrlevels[level].gpu_freq);
92 }
Kedar Joshic11d0982012-02-07 10:59:49 +053093 }
Lucille Sylvester622927a2011-08-10 14:42:25 -060094 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095 if (pwr->pcl)
96 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070097 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060098 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070099 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -0600100 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700101 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
102 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103 }
104}
105EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
106
107static int __gpuclk_store(int max, struct device *dev,
108 struct device_attribute *attr,
109 const char *buf, size_t count)
110{ int ret, i, delta = 5000000;
111 unsigned long val;
112 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600113 struct kgsl_pwrctrl *pwr;
114
115 if (device == NULL)
116 return 0;
117 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118
119 ret = sscanf(buf, "%ld", &val);
120 if (ret != 1)
121 return count;
122
123 mutex_lock(&device->mutex);
124 for (i = 0; i < pwr->num_pwrlevels; i++) {
125 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
126 if (max)
127 pwr->thermal_pwrlevel = i;
128 break;
129 }
130 }
131
132 if (i == pwr->num_pwrlevels)
133 goto done;
134
135 /*
136 * If the current or requested clock speed is greater than the
137 * thermal limit, bump down immediately.
138 */
139
140 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
141 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
142 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
143 else if (!max)
144 kgsl_pwrctrl_pwrlevel_change(device, i);
145
146done:
147 mutex_unlock(&device->mutex);
148 return count;
149}
150
151static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
152 struct device_attribute *attr,
153 const char *buf, size_t count)
154{
155 return __gpuclk_store(1, dev, attr, buf, count);
156}
157
158static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
159 struct device_attribute *attr,
160 char *buf)
161{
162 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600163 struct kgsl_pwrctrl *pwr;
164 if (device == NULL)
165 return 0;
166 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 return snprintf(buf, PAGE_SIZE, "%d\n",
168 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
169}
170
171static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
172 struct device_attribute *attr,
173 const char *buf, size_t count)
174{
175 return __gpuclk_store(0, dev, attr, buf, count);
176}
177
178static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
179 struct device_attribute *attr,
180 char *buf)
181{
182 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600183 struct kgsl_pwrctrl *pwr;
184 if (device == NULL)
185 return 0;
186 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 return snprintf(buf, PAGE_SIZE, "%d\n",
188 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
189}
190
191static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
192 struct device_attribute *attr,
193 const char *buf, size_t count)
194{
195 char temp[20];
196 unsigned long val;
197 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600198 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 int rc;
200
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600201 if (device == NULL)
202 return 0;
203 pwr = &device->pwrctrl;
204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 snprintf(temp, sizeof(temp), "%.*s",
206 (int)min(count, sizeof(temp) - 1), buf);
207 rc = strict_strtoul(temp, 0, &val);
208 if (rc)
209 return rc;
210
211 mutex_lock(&device->mutex);
212
213 if (val == 1)
214 pwr->nap_allowed = true;
215 else if (val == 0)
216 pwr->nap_allowed = false;
217
218 mutex_unlock(&device->mutex);
219
220 return count;
221}
222
223static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
224 struct device_attribute *attr,
225 char *buf)
226{
227 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600228 if (device == NULL)
229 return 0;
230 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231}
232
233
234static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
235 struct device_attribute *attr,
236 const char *buf, size_t count)
237{
238 char temp[20];
239 unsigned long val;
240 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600241 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242 const long div = 1000/HZ;
243 static unsigned int org_interval_timeout = 1;
244 int rc;
245
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600246 if (device == NULL)
247 return 0;
248 pwr = &device->pwrctrl;
249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250 snprintf(temp, sizeof(temp), "%.*s",
251 (int)min(count, sizeof(temp) - 1), buf);
252 rc = strict_strtoul(temp, 0, &val);
253 if (rc)
254 return rc;
255
256 if (org_interval_timeout == 1)
257 org_interval_timeout = pwr->interval_timeout;
258
259 mutex_lock(&device->mutex);
260
261 /* Let the timeout be requested in ms, but convert to jiffies. */
262 val /= div;
263 if (val >= org_interval_timeout)
264 pwr->interval_timeout = val;
265
266 mutex_unlock(&device->mutex);
267
268 return count;
269}
270
271static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
272 struct device_attribute *attr,
273 char *buf)
274{
275 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600276 if (device == NULL)
277 return 0;
278 return snprintf(buf, PAGE_SIZE, "%d\n",
279 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280}
281
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700282static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
283 struct device_attribute *attr,
284 char *buf)
285{
286 int ret;
287 struct kgsl_device *device = kgsl_device_from_dev(dev);
288 struct kgsl_busy *b = &device->pwrctrl.busy;
289 ret = snprintf(buf, 17, "%7d %7d\n",
290 b->on_time_old, b->time_old);
291 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
292 b->on_time_old = 0;
293 b->time_old = 0;
294 }
295 return ret;
296}
297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
299DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
300 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530301DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
303 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700304DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
305 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306
307static const struct device_attribute *pwrctrl_attr_list[] = {
308 &dev_attr_gpuclk,
309 &dev_attr_max_gpuclk,
310 &dev_attr_pwrnap,
311 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700312 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 NULL
314};
315
316int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
317{
318 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
319}
320
321void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
322{
323 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
324}
325
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700326/* Track the amount of time the gpu is on vs the total system time. *
327 * Regularly update the percentage of busy time displayed by sysfs. */
328static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
329{
330 struct kgsl_busy *b = &device->pwrctrl.busy;
331 int elapsed;
332 if (b->start.tv_sec == 0)
333 do_gettimeofday(&(b->start));
334 do_gettimeofday(&(b->stop));
335 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
336 elapsed += b->stop.tv_usec - b->start.tv_usec;
337 b->time += elapsed;
338 if (on_time)
339 b->on_time += elapsed;
340 /* Update the output regularly and reset the counters. */
341 if ((b->time > UPDATE_BUSY_VAL) ||
342 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
343 b->on_time_old = b->on_time;
344 b->time_old = b->time;
345 b->on_time = 0;
346 b->time = 0;
347 }
348 do_gettimeofday(&(b->start));
349}
350
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600351void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
352 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353{
354 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
355 int i = 0;
356 if (state == KGSL_PWRFLAGS_OFF) {
357 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
358 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700359 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
361 if (pwr->grp_clks[i])
362 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700363 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700365 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366 clk_set_rate(pwr->grp_clks[0],
367 pwr->pwrlevels[pwr->num_pwrlevels - 1].
368 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700369 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
370 if (pwr->grp_clks[i])
371 clk_unprepare(pwr->grp_clks[i]);
372 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700373 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 }
375 } else if (state == KGSL_PWRFLAGS_ON) {
376 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
377 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700378 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700379 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700381 (device->state != KGSL_STATE_NAP)) {
382 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
383 if (pwr->grp_clks[i])
384 clk_prepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 clk_set_rate(pwr->grp_clks[0],
386 pwr->pwrlevels[pwr->active_pwrlevel].
387 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700388 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389
390 /* as last step, enable grp_clk
391 this is to let GPU interrupt to come */
392 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
393 if (pwr->grp_clks[i])
394 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700395 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 }
397 }
398}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399
400void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
401{
402 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
403
404 if (state == KGSL_PWRFLAGS_OFF) {
405 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
406 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700407 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530408 if (pwr->ebi1_clk) {
409 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600410 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530411 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 if (pwr->pcl)
413 msm_bus_scale_client_update_request(pwr->pcl,
414 0);
415 }
416 } else if (state == KGSL_PWRFLAGS_ON) {
417 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
418 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700419 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530420 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600421 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530422 clk_set_rate(pwr->ebi1_clk,
423 pwr->pwrlevels[pwr->active_pwrlevel].
424 bus_freq);
425 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 if (pwr->pcl)
427 msm_bus_scale_client_update_request(pwr->pcl,
428 pwr->pwrlevels[pwr->active_pwrlevel].
429 bus_freq);
430 }
431 }
432}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433
434void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
435{
436 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
437
438 if (state == KGSL_PWRFLAGS_OFF) {
439 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
440 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700441 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 if (pwr->gpu_reg)
443 regulator_disable(pwr->gpu_reg);
444 }
445 } else if (state == KGSL_PWRFLAGS_ON) {
446 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
447 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700448 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600449 if (pwr->gpu_reg) {
450 int status = regulator_enable(pwr->gpu_reg);
451 if (status)
452 KGSL_DRV_ERR(device, "regulator_enable "
453 "failed: %d\n", status);
454 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455 }
456 }
457}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458
459void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
460{
461 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
462
463 if (state == KGSL_PWRFLAGS_ON) {
464 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
465 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700466 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468 }
469 } else if (state == KGSL_PWRFLAGS_OFF) {
470 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
471 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700472 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600473 if (in_interrupt())
474 disable_irq_nosync(pwr->interrupt_num);
475 else
476 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 }
478 }
479}
480EXPORT_SYMBOL(kgsl_pwrctrl_irq);
481
482int kgsl_pwrctrl_init(struct kgsl_device *device)
483{
484 int i, result = 0;
485 struct clk *clk;
486 struct platform_device *pdev =
487 container_of(device->parentdev, struct platform_device, dev);
488 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600489 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490
491 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600492 for (i = 0; i < KGSL_MAX_CLKS; i++) {
493 if (pdata->clk_map & clks[i].map) {
494 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (IS_ERR(clk))
496 goto clk_err;
497 pwr->grp_clks[i] = clk;
498 }
499 }
500 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600501 if (pwr->grp_clks[0] == NULL)
502 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503
504 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600505 if (pdata->set_grp_async != NULL)
506 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600508 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600510 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 result = -EINVAL;
512 goto done;
513 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600514 pwr->num_pwrlevels = pdata->num_levels;
515 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800516 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600517 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600519 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600521 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 gpu_freq) : 0;
523 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600524 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600525 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600526 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527 }
528 /* Do not set_rate for targets in sync with AXI */
529 if (pwr->pwrlevels[0].gpu_freq > 0)
530 clk_set_rate(pwr->grp_clks[0], pwr->
531 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
532
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700533 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 if (IS_ERR(pwr->gpu_reg))
535 pwr->gpu_reg = NULL;
536
537 pwr->power_flags = 0;
538
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600539 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530540 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600541 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700542 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700543 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544 if (IS_ERR(pwr->ebi1_clk))
545 pwr->ebi1_clk = NULL;
546 else
547 clk_set_rate(pwr->ebi1_clk,
548 pwr->pwrlevels[pwr->active_pwrlevel].
549 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600550 if (pdata->bus_scale_table != NULL) {
551 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 bus_scale_table);
553 if (!pwr->pcl) {
554 KGSL_PWR_ERR(device,
555 "msm_bus_scale_register_client failed: "
556 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600557 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 result = -EINVAL;
559 goto done;
560 }
561 }
562
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700564 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 register_early_suspend(&device->display_off);
566 return result;
567
568clk_err:
569 result = PTR_ERR(clk);
570 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600571 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572
573done:
574 return result;
575}
576
577void kgsl_pwrctrl_close(struct kgsl_device *device)
578{
579 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
580 int i;
581
582 KGSL_PWR_INFO(device, "close device %d\n", device->id);
583
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700584 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 unregister_early_suspend(&device->display_off);
586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 clk_put(pwr->ebi1_clk);
588
589 if (pwr->pcl)
590 msm_bus_scale_unregister_client(pwr->pcl);
591
592 pwr->pcl = 0;
593
594 if (pwr->gpu_reg) {
595 regulator_put(pwr->gpu_reg);
596 pwr->gpu_reg = NULL;
597 }
598
599 for (i = 1; i < KGSL_MAX_CLKS; i++)
600 if (pwr->grp_clks[i]) {
601 clk_put(pwr->grp_clks[i]);
602 pwr->grp_clks[i] = NULL;
603 }
604
605 pwr->grp_clks[0] = NULL;
606 pwr->power_flags = 0;
607}
608
609void kgsl_idle_check(struct work_struct *work)
610{
611 struct kgsl_device *device = container_of(work, struct kgsl_device,
612 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700613 WARN_ON(device == NULL);
614 if (device == NULL)
615 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616
617 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Lucille Sylvester6e362412011-12-09 16:21:42 -0700619 kgsl_pwrscale_idle(device);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600620
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700621 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 mod_timer(&device->idle_timer,
623 jiffies +
624 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700625 /* If the GPU has been too busy to sleep, make sure *
626 * that is acurately reflected in the % busy numbers. */
627 device->pwrctrl.busy.no_nap_cnt++;
628 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
629 kgsl_pwrctrl_busy_time(device, true);
630 device->pwrctrl.busy.no_nap_cnt = 0;
631 }
632 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633 } else if (device->state & (KGSL_STATE_HUNG |
634 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700635 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 }
637
638 mutex_unlock(&device->mutex);
639}
640
641void kgsl_timer(unsigned long data)
642{
643 struct kgsl_device *device = (struct kgsl_device *) data;
644
645 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530646 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -0700647 if (device->pwrctrl.restore_slumber ||
648 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700649 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
650 else
651 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 /* Have work run in a non-interrupt context. */
653 queue_work(device->work_queue, &device->idle_check_ws);
654 }
655}
656
657void kgsl_pre_hwaccess(struct kgsl_device *device)
658{
659 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700660 switch (device->state) {
661 case KGSL_STATE_ACTIVE:
662 return;
663 case KGSL_STATE_NAP:
664 case KGSL_STATE_SLEEP:
665 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700667 break;
668 case KGSL_STATE_SUSPEND:
669 kgsl_check_suspended(device);
670 break;
671 case KGSL_STATE_INIT:
672 case KGSL_STATE_HUNG:
673 case KGSL_STATE_DUMP_AND_RECOVER:
674 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
675 &device->pwrctrl.power_flags))
676 break;
677 else
678 KGSL_PWR_ERR(device,
679 "hw access while clocks off from state %d\n",
680 device->state);
681 break;
682 default:
683 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
684 device->state);
685 break;
686 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687}
688EXPORT_SYMBOL(kgsl_pre_hwaccess);
689
690void kgsl_check_suspended(struct kgsl_device *device)
691{
692 if (device->requested_state == KGSL_STATE_SUSPEND ||
693 device->state == KGSL_STATE_SUSPEND) {
694 mutex_unlock(&device->mutex);
695 wait_for_completion(&device->hwaccess_gate);
696 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700697 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 mutex_unlock(&device->mutex);
699 wait_for_completion(&device->recovery_gate);
700 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700701 } else if (device->state == KGSL_STATE_SLUMBER)
702 kgsl_pwrctrl_wake(device);
703}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704
Suman Tatiraju24569022011-10-27 11:11:12 -0700705static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700706_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700707{
Suman Tatiraju24569022011-10-27 11:11:12 -0700708 switch (device->state) {
709 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700710 if (!device->ftbl->isidle(device)) {
711 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
712 return -EBUSY;
713 }
714 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600715 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600716 kgsl_mmu_disable_clk(&device->mmu);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600717 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700718 if (device->idle_wakelock.name)
719 wake_unlock(&device->idle_wakelock);
Suman Tatiraju24569022011-10-27 11:11:12 -0700720 case KGSL_STATE_NAP:
721 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700722 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700723 break;
724 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700725 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700726 break;
727 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700728 return 0;
729}
730
731static void
732_sleep_accounting(struct kgsl_device *device)
733{
734 kgsl_pwrctrl_busy_time(device, false);
735 device->pwrctrl.busy.start.tv_sec = 0;
736 device->pwrctrl.time = 0;
737 kgsl_pwrscale_sleep(device);
738}
739
740static int
741_sleep(struct kgsl_device *device)
742{
743 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
744 switch (device->state) {
745 case KGSL_STATE_ACTIVE:
746 if (!device->ftbl->isidle(device)) {
747 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
748 return -EBUSY;
749 }
750 /* fall through */
751 case KGSL_STATE_NAP:
752 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
753 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
754 if (pwr->pwrlevels[0].gpu_freq > 0)
755 clk_set_rate(pwr->grp_clks[0],
756 pwr->pwrlevels[pwr->num_pwrlevels - 1].
757 gpu_freq);
758 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600759 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600760 kgsl_mmu_disable_clk(&device->mmu);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700761 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -0700762 wake_unlock(&device->idle_wakelock);
763 pm_qos_update_request(&device->pm_qos_req_dma,
764 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700765 break;
766 case KGSL_STATE_SLEEP:
767 case KGSL_STATE_SLUMBER:
768 break;
769 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700770 KGSL_PWR_WARN(device, "unhandled state %s\n",
771 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700772 break;
773 }
774 return 0;
775}
776
777static int
778_slumber(struct kgsl_device *device)
779{
780 switch (device->state) {
781 case KGSL_STATE_ACTIVE:
782 if (!device->ftbl->isidle(device)) {
783 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
784 device->pwrctrl.restore_slumber = true;
785 return -EBUSY;
786 }
787 /* fall through */
788 case KGSL_STATE_NAP:
789 case KGSL_STATE_SLEEP:
790 del_timer_sync(&device->idle_timer);
Lynus Vazfe4bede2012-04-06 11:53:30 -0700791 if (!device->pwrctrl.strtstp_sleepwake)
792 kgsl_pwrctrl_pwrlevel_change(device,
793 KGSL_PWRLEVEL_NOMINAL);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700794 device->pwrctrl.restore_slumber = true;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700795 device->ftbl->suspend_context(device);
796 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700797 _sleep_accounting(device);
798 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
799 if (device->idle_wakelock.name)
800 wake_unlock(&device->idle_wakelock);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700801 pm_qos_update_request(&device->pm_qos_req_dma,
802 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700803 break;
804 case KGSL_STATE_SLUMBER:
805 break;
806 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700807 KGSL_PWR_WARN(device, "unhandled state %s\n",
808 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700809 break;
810 }
811 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700812}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813
814/******************************************************************/
815/* Caller must hold the device mutex. */
816int kgsl_pwrctrl_sleep(struct kgsl_device *device)
817{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700818 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
820
821 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700822 switch (device->requested_state) {
823 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700824 status = _nap(device);
825 break;
826 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700827 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700828 break;
829 case KGSL_STATE_SLUMBER:
830 status = _slumber(device);
831 break;
832 default:
833 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
834 device->requested_state);
835 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
836 status = -EINVAL;
837 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700839 return status;
840}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700841EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700842
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843/******************************************************************/
844/* Caller must hold the device mutex. */
845void kgsl_pwrctrl_wake(struct kgsl_device *device)
846{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700847 int status;
848 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
849 switch (device->state) {
850 case KGSL_STATE_SLUMBER:
851 status = device->ftbl->start(device, 0);
852 if (status) {
853 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
854 KGSL_DRV_ERR(device, "start failed %d\n", status);
855 break;
856 }
857 /* fall through */
858 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
860 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700861 /* fall through */
862 case KGSL_STATE_NAP:
863 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600864 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700865 /* Enable state before turning on irq */
866 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
867 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
868 /* Re-enable HW access */
869 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870 jiffies + device->pwrctrl.interval_timeout);
Lucille Sylvester10297892012-02-27 13:54:47 -0700871 wake_lock(&device->idle_wakelock);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700872 if (device->pwrctrl.restore_slumber == false)
873 pm_qos_update_request(&device->pm_qos_req_dma,
874 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700875 case KGSL_STATE_ACTIVE:
876 break;
877 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700878 KGSL_PWR_WARN(device, "unhandled state %s\n",
879 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700880 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
881 break;
882 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883}
884EXPORT_SYMBOL(kgsl_pwrctrl_wake);
885
886void kgsl_pwrctrl_enable(struct kgsl_device *device)
887{
888 /* Order pwrrail/clk sequence based upon platform */
889 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600890 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
892}
893EXPORT_SYMBOL(kgsl_pwrctrl_enable);
894
895void kgsl_pwrctrl_disable(struct kgsl_device *device)
896{
897 /* Order pwrrail/clk sequence based upon platform */
898 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600899 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600900 kgsl_mmu_disable_clk(&device->mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
902}
903EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700904
905void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
906{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700907 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700908 device->state = state;
909 device->requested_state = KGSL_STATE_NONE;
910}
911EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
912
913void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
914{
915 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700916 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700917 device->requested_state = state;
918}
919EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700920
921const char *kgsl_pwrstate_to_str(unsigned int state)
922{
923 switch (state) {
924 case KGSL_STATE_NONE:
925 return "NONE";
926 case KGSL_STATE_INIT:
927 return "INIT";
928 case KGSL_STATE_ACTIVE:
929 return "ACTIVE";
930 case KGSL_STATE_NAP:
931 return "NAP";
932 case KGSL_STATE_SLEEP:
933 return "SLEEP";
934 case KGSL_STATE_SUSPEND:
935 return "SUSPEND";
936 case KGSL_STATE_HUNG:
937 return "HUNG";
938 case KGSL_STATE_DUMP_AND_RECOVER:
939 return "DNR";
940 case KGSL_STATE_SLUMBER:
941 return "SLUMBER";
942 default:
943 break;
944 }
945 return "UNKNOWN";
946}
947EXPORT_SYMBOL(kgsl_pwrstate_to_str);
948