blob: e4d7141bd8ba31b2fb83b401f68bb34cd56a184c [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
20
21#include "kgsl.h"
22#include "kgsl_pwrscale.h"
23#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070024#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060026#define KGSL_PWRFLAGS_POWER_ON 0
27#define KGSL_PWRFLAGS_CLK_ON 1
28#define KGSL_PWRFLAGS_AXI_ON 2
29#define KGSL_PWRFLAGS_IRQ_ON 3
30
Lucille Sylvester10297892012-02-27 13:54:47 -070031#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070032#define UPDATE_BUSY_VAL 1000000
33#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060035struct clk_pair {
36 const char *name;
37 uint map;
38};
39
40struct clk_pair clks[KGSL_MAX_CLKS] = {
41 {
42 .name = "src_clk",
43 .map = KGSL_CLK_SRC,
44 },
45 {
46 .name = "core_clk",
47 .map = KGSL_CLK_CORE,
48 },
49 {
50 .name = "iface_clk",
51 .map = KGSL_CLK_IFACE,
52 },
53 {
54 .name = "mem_clk",
55 .map = KGSL_CLK_MEM,
56 },
57 {
58 .name = "mem_iface_clk",
59 .map = KGSL_CLK_MEM_IFACE,
60 },
61};
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
64 unsigned int new_level)
65{
66 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
67 if (new_level < (pwr->num_pwrlevels - 1) &&
68 new_level >= pwr->thermal_pwrlevel &&
69 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070070 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060071 int diff = new_level - pwr->active_pwrlevel;
72 int d = (diff > 0) ? 1 : -1;
73 int level = pwr->active_pwrlevel;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060075 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
Kedar Joshic11d0982012-02-07 10:59:49 +053076 (device->state == KGSL_STATE_NAP)) {
77 /*
78 * On some platforms, instability is caused on
79 * changing clock freq when the core is busy.
80 * Idle the gpu core before changing the clock freq.
81 */
82 if (pwr->idle_needed == true)
Jordan Crousea29a2e02012-08-14 09:09:23 -060083 device->ftbl->idle(device);
84
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060085 /* Don't shift by more than one level at a time to
86 * avoid glitches.
87 */
88 while (level != new_level) {
89 level += d;
90 clk_set_rate(pwr->grp_clks[0],
91 pwr->pwrlevels[level].gpu_freq);
92 }
Kedar Joshic11d0982012-02-07 10:59:49 +053093 }
Lucille Sylvester622927a2011-08-10 14:42:25 -060094 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095 if (pwr->pcl)
96 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070097 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060098 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070099 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -0600100 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700101 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
102 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103 }
104}
105EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
106
107static int __gpuclk_store(int max, struct device *dev,
108 struct device_attribute *attr,
109 const char *buf, size_t count)
110{ int ret, i, delta = 5000000;
111 unsigned long val;
112 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600113 struct kgsl_pwrctrl *pwr;
114
115 if (device == NULL)
116 return 0;
117 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118
119 ret = sscanf(buf, "%ld", &val);
120 if (ret != 1)
121 return count;
122
123 mutex_lock(&device->mutex);
Lucille Sylvesterb7626dc62012-06-28 18:46:24 -0600124 for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
126 if (max)
127 pwr->thermal_pwrlevel = i;
128 break;
129 }
130 }
131
Lucille Sylvesterb7626dc62012-06-28 18:46:24 -0600132 if (i == (pwr->num_pwrlevels - 1))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 goto done;
134
135 /*
136 * If the current or requested clock speed is greater than the
137 * thermal limit, bump down immediately.
138 */
139
140 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
141 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
142 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
143 else if (!max)
144 kgsl_pwrctrl_pwrlevel_change(device, i);
145
146done:
147 mutex_unlock(&device->mutex);
148 return count;
149}
150
151static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
152 struct device_attribute *attr,
153 const char *buf, size_t count)
154{
155 return __gpuclk_store(1, dev, attr, buf, count);
156}
157
158static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
159 struct device_attribute *attr,
160 char *buf)
161{
162 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600163 struct kgsl_pwrctrl *pwr;
164 if (device == NULL)
165 return 0;
166 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 return snprintf(buf, PAGE_SIZE, "%d\n",
168 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
169}
170
171static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
172 struct device_attribute *attr,
173 const char *buf, size_t count)
174{
175 return __gpuclk_store(0, dev, attr, buf, count);
176}
177
178static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
179 struct device_attribute *attr,
180 char *buf)
181{
182 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600183 struct kgsl_pwrctrl *pwr;
184 if (device == NULL)
185 return 0;
186 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 return snprintf(buf, PAGE_SIZE, "%d\n",
188 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
189}
190
191static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
192 struct device_attribute *attr,
193 const char *buf, size_t count)
194{
195 char temp[20];
196 unsigned long val;
197 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600198 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 int rc;
200
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600201 if (device == NULL)
202 return 0;
203 pwr = &device->pwrctrl;
204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 snprintf(temp, sizeof(temp), "%.*s",
206 (int)min(count, sizeof(temp) - 1), buf);
207 rc = strict_strtoul(temp, 0, &val);
208 if (rc)
209 return rc;
210
211 mutex_lock(&device->mutex);
212
213 if (val == 1)
214 pwr->nap_allowed = true;
215 else if (val == 0)
216 pwr->nap_allowed = false;
217
218 mutex_unlock(&device->mutex);
219
220 return count;
221}
222
223static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
224 struct device_attribute *attr,
225 char *buf)
226{
227 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600228 if (device == NULL)
229 return 0;
230 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231}
232
233
234static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
235 struct device_attribute *attr,
236 const char *buf, size_t count)
237{
238 char temp[20];
239 unsigned long val;
240 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600241 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242 const long div = 1000/HZ;
243 static unsigned int org_interval_timeout = 1;
244 int rc;
245
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600246 if (device == NULL)
247 return 0;
248 pwr = &device->pwrctrl;
249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250 snprintf(temp, sizeof(temp), "%.*s",
251 (int)min(count, sizeof(temp) - 1), buf);
252 rc = strict_strtoul(temp, 0, &val);
253 if (rc)
254 return rc;
255
256 if (org_interval_timeout == 1)
257 org_interval_timeout = pwr->interval_timeout;
258
259 mutex_lock(&device->mutex);
260
261 /* Let the timeout be requested in ms, but convert to jiffies. */
262 val /= div;
263 if (val >= org_interval_timeout)
264 pwr->interval_timeout = val;
265
266 mutex_unlock(&device->mutex);
267
268 return count;
269}
270
271static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
272 struct device_attribute *attr,
273 char *buf)
274{
275 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600276 if (device == NULL)
277 return 0;
278 return snprintf(buf, PAGE_SIZE, "%d\n",
279 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280}
281
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700282static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
283 struct device_attribute *attr,
284 char *buf)
285{
286 int ret;
287 struct kgsl_device *device = kgsl_device_from_dev(dev);
288 struct kgsl_busy *b = &device->pwrctrl.busy;
289 ret = snprintf(buf, 17, "%7d %7d\n",
290 b->on_time_old, b->time_old);
291 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
292 b->on_time_old = 0;
293 b->time_old = 0;
294 }
295 return ret;
296}
297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
299DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
300 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530301DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
303 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700304DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
305 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306
307static const struct device_attribute *pwrctrl_attr_list[] = {
308 &dev_attr_gpuclk,
309 &dev_attr_max_gpuclk,
310 &dev_attr_pwrnap,
311 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700312 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 NULL
314};
315
316int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
317{
318 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
319}
320
321void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
322{
323 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
324}
325
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700326/* Track the amount of time the gpu is on vs the total system time. *
327 * Regularly update the percentage of busy time displayed by sysfs. */
328static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
329{
330 struct kgsl_busy *b = &device->pwrctrl.busy;
331 int elapsed;
332 if (b->start.tv_sec == 0)
333 do_gettimeofday(&(b->start));
334 do_gettimeofday(&(b->stop));
335 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
336 elapsed += b->stop.tv_usec - b->start.tv_usec;
337 b->time += elapsed;
338 if (on_time)
339 b->on_time += elapsed;
340 /* Update the output regularly and reset the counters. */
341 if ((b->time > UPDATE_BUSY_VAL) ||
342 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
343 b->on_time_old = b->on_time;
344 b->time_old = b->time;
345 b->on_time = 0;
346 b->time = 0;
347 }
348 do_gettimeofday(&(b->start));
349}
350
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600351void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
352 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353{
354 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
355 int i = 0;
356 if (state == KGSL_PWRFLAGS_OFF) {
357 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
358 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700359 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
361 if (pwr->grp_clks[i])
362 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700363 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700365 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366 clk_set_rate(pwr->grp_clks[0],
367 pwr->pwrlevels[pwr->num_pwrlevels - 1].
368 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700369 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
370 if (pwr->grp_clks[i])
371 clk_unprepare(pwr->grp_clks[i]);
372 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700373 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 }
375 } else if (state == KGSL_PWRFLAGS_ON) {
376 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
377 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700378 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700379 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700381 (device->state != KGSL_STATE_NAP)) {
382 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
383 if (pwr->grp_clks[i])
384 clk_prepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 clk_set_rate(pwr->grp_clks[0],
386 pwr->pwrlevels[pwr->active_pwrlevel].
387 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700388 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389
390 /* as last step, enable grp_clk
391 this is to let GPU interrupt to come */
392 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
393 if (pwr->grp_clks[i])
394 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700395 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 }
397 }
398}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399
400void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
401{
402 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
403
404 if (state == KGSL_PWRFLAGS_OFF) {
405 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
406 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700407 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530408 if (pwr->ebi1_clk) {
409 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600410 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530411 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 if (pwr->pcl)
413 msm_bus_scale_client_update_request(pwr->pcl,
414 0);
415 }
416 } else if (state == KGSL_PWRFLAGS_ON) {
417 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
418 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700419 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530420 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600421 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530422 clk_set_rate(pwr->ebi1_clk,
423 pwr->pwrlevels[pwr->active_pwrlevel].
424 bus_freq);
425 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 if (pwr->pcl)
427 msm_bus_scale_client_update_request(pwr->pcl,
428 pwr->pwrlevels[pwr->active_pwrlevel].
429 bus_freq);
430 }
431 }
432}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433
434void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
435{
436 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
437
438 if (state == KGSL_PWRFLAGS_OFF) {
439 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
440 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700441 trace_kgsl_rail(device, state);
Pu Chen12053782012-07-24 17:04:27 -0700442 if (pwr->gpu_cx)
443 regulator_disable(pwr->gpu_cx);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444 if (pwr->gpu_reg)
445 regulator_disable(pwr->gpu_reg);
446 }
447 } else if (state == KGSL_PWRFLAGS_ON) {
448 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
449 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700450 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600451 if (pwr->gpu_reg) {
452 int status = regulator_enable(pwr->gpu_reg);
453 if (status)
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700454 KGSL_DRV_ERR(device,
455 "core regulator_enable "
456 "failed: %d\n",
457 status);
458 }
Pu Chen12053782012-07-24 17:04:27 -0700459 if (pwr->gpu_cx) {
460 int status = regulator_enable(pwr->gpu_cx);
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700461 if (status)
462 KGSL_DRV_ERR(device,
463 "cx regulator_enable "
464 "failed: %d\n",
465 status);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600466 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 }
468 }
469}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470
471void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
472{
473 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
474
475 if (state == KGSL_PWRFLAGS_ON) {
476 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
477 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700478 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 }
481 } else if (state == KGSL_PWRFLAGS_OFF) {
482 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
483 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700484 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600485 if (in_interrupt())
486 disable_irq_nosync(pwr->interrupt_num);
487 else
488 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 }
490 }
491}
492EXPORT_SYMBOL(kgsl_pwrctrl_irq);
493
494int kgsl_pwrctrl_init(struct kgsl_device *device)
495{
496 int i, result = 0;
497 struct clk *clk;
498 struct platform_device *pdev =
499 container_of(device->parentdev, struct platform_device, dev);
500 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600501 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502
503 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600504 for (i = 0; i < KGSL_MAX_CLKS; i++) {
505 if (pdata->clk_map & clks[i].map) {
506 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 if (IS_ERR(clk))
508 goto clk_err;
509 pwr->grp_clks[i] = clk;
510 }
511 }
512 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600513 if (pwr->grp_clks[0] == NULL)
514 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515
516 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600517 if (pdata->set_grp_async != NULL)
518 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600520 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600522 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 result = -EINVAL;
524 goto done;
525 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600526 pwr->num_pwrlevels = pdata->num_levels;
527 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800528 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600529 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600531 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600533 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 gpu_freq) : 0;
535 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600536 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600537 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600538 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 }
540 /* Do not set_rate for targets in sync with AXI */
541 if (pwr->pwrlevels[0].gpu_freq > 0)
542 clk_set_rate(pwr->grp_clks[0], pwr->
543 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
544
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700545 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 if (IS_ERR(pwr->gpu_reg))
547 pwr->gpu_reg = NULL;
548
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700549 if (pwr->gpu_reg) {
Pu Chen12053782012-07-24 17:04:27 -0700550 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
551 if (IS_ERR(pwr->gpu_cx))
552 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700553 } else
Pu Chen12053782012-07-24 17:04:27 -0700554 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556 pwr->power_flags = 0;
557
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600558 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530559 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600560 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700561 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700562 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563 if (IS_ERR(pwr->ebi1_clk))
564 pwr->ebi1_clk = NULL;
565 else
566 clk_set_rate(pwr->ebi1_clk,
567 pwr->pwrlevels[pwr->active_pwrlevel].
568 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600569 if (pdata->bus_scale_table != NULL) {
570 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 bus_scale_table);
572 if (!pwr->pcl) {
573 KGSL_PWR_ERR(device,
574 "msm_bus_scale_register_client failed: "
575 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600576 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 result = -EINVAL;
578 goto done;
579 }
580 }
581
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700583 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 register_early_suspend(&device->display_off);
585 return result;
586
587clk_err:
588 result = PTR_ERR(clk);
589 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600590 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591
592done:
593 return result;
594}
595
596void kgsl_pwrctrl_close(struct kgsl_device *device)
597{
598 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
599 int i;
600
601 KGSL_PWR_INFO(device, "close device %d\n", device->id);
602
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700603 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 unregister_early_suspend(&device->display_off);
605
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 clk_put(pwr->ebi1_clk);
607
608 if (pwr->pcl)
609 msm_bus_scale_unregister_client(pwr->pcl);
610
611 pwr->pcl = 0;
612
613 if (pwr->gpu_reg) {
614 regulator_put(pwr->gpu_reg);
615 pwr->gpu_reg = NULL;
616 }
617
Pu Chen12053782012-07-24 17:04:27 -0700618 if (pwr->gpu_cx) {
619 regulator_put(pwr->gpu_cx);
620 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700621 }
622
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 for (i = 1; i < KGSL_MAX_CLKS; i++)
624 if (pwr->grp_clks[i]) {
625 clk_put(pwr->grp_clks[i]);
626 pwr->grp_clks[i] = NULL;
627 }
628
629 pwr->grp_clks[0] = NULL;
630 pwr->power_flags = 0;
631}
632
633void kgsl_idle_check(struct work_struct *work)
634{
635 struct kgsl_device *device = container_of(work, struct kgsl_device,
636 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700637 WARN_ON(device == NULL);
638 if (device == NULL)
639 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640
641 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju3de11cc2012-06-20 10:33:32 -0700643 kgsl_pwrscale_idle(device, 0);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600644
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700645 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 mod_timer(&device->idle_timer,
647 jiffies +
648 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700649 /* If the GPU has been too busy to sleep, make sure *
650 * that is acurately reflected in the % busy numbers. */
651 device->pwrctrl.busy.no_nap_cnt++;
652 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
653 kgsl_pwrctrl_busy_time(device, true);
654 device->pwrctrl.busy.no_nap_cnt = 0;
655 }
656 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657 } else if (device->state & (KGSL_STATE_HUNG |
658 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700659 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660 }
661
662 mutex_unlock(&device->mutex);
663}
664
665void kgsl_timer(unsigned long data)
666{
667 struct kgsl_device *device = (struct kgsl_device *) data;
668
669 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530670 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -0700671 if (device->pwrctrl.restore_slumber ||
672 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700673 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
674 else
675 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676 /* Have work run in a non-interrupt context. */
677 queue_work(device->work_queue, &device->idle_check_ws);
678 }
679}
680
681void kgsl_pre_hwaccess(struct kgsl_device *device)
682{
683 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700684 switch (device->state) {
685 case KGSL_STATE_ACTIVE:
686 return;
687 case KGSL_STATE_NAP:
688 case KGSL_STATE_SLEEP:
689 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700691 break;
692 case KGSL_STATE_SUSPEND:
693 kgsl_check_suspended(device);
694 break;
695 case KGSL_STATE_INIT:
696 case KGSL_STATE_HUNG:
697 case KGSL_STATE_DUMP_AND_RECOVER:
698 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
699 &device->pwrctrl.power_flags))
700 break;
701 else
702 KGSL_PWR_ERR(device,
703 "hw access while clocks off from state %d\n",
704 device->state);
705 break;
706 default:
707 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
708 device->state);
709 break;
710 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711}
712EXPORT_SYMBOL(kgsl_pre_hwaccess);
713
714void kgsl_check_suspended(struct kgsl_device *device)
715{
716 if (device->requested_state == KGSL_STATE_SUSPEND ||
717 device->state == KGSL_STATE_SUSPEND) {
718 mutex_unlock(&device->mutex);
719 wait_for_completion(&device->hwaccess_gate);
720 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700721 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 mutex_unlock(&device->mutex);
723 wait_for_completion(&device->recovery_gate);
724 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700725 } else if (device->state == KGSL_STATE_SLUMBER)
726 kgsl_pwrctrl_wake(device);
727}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728
Suman Tatiraju24569022011-10-27 11:11:12 -0700729static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700730_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700731{
Suman Tatiraju24569022011-10-27 11:11:12 -0700732 switch (device->state) {
733 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700734 if (!device->ftbl->isidle(device)) {
735 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
736 return -EBUSY;
737 }
738 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600739 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
740 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Suman Tatiraju24569022011-10-27 11:11:12 -0700741 case KGSL_STATE_NAP:
742 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700743 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700744 break;
745 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700746 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700747 break;
748 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700749 return 0;
750}
751
752static void
753_sleep_accounting(struct kgsl_device *device)
754{
755 kgsl_pwrctrl_busy_time(device, false);
756 device->pwrctrl.busy.start.tv_sec = 0;
757 device->pwrctrl.time = 0;
758 kgsl_pwrscale_sleep(device);
759}
760
761static int
762_sleep(struct kgsl_device *device)
763{
764 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
765 switch (device->state) {
766 case KGSL_STATE_ACTIVE:
767 if (!device->ftbl->isidle(device)) {
768 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
769 return -EBUSY;
770 }
771 /* fall through */
772 case KGSL_STATE_NAP:
773 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
774 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
775 if (pwr->pwrlevels[0].gpu_freq > 0)
776 clk_set_rate(pwr->grp_clks[0],
777 pwr->pwrlevels[pwr->num_pwrlevels - 1].
778 gpu_freq);
779 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600780 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700781 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -0700782 pm_qos_update_request(&device->pm_qos_req_dma,
783 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700784 break;
785 case KGSL_STATE_SLEEP:
786 case KGSL_STATE_SLUMBER:
787 break;
788 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700789 KGSL_PWR_WARN(device, "unhandled state %s\n",
790 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700791 break;
792 }
793 return 0;
794}
795
796static int
797_slumber(struct kgsl_device *device)
798{
799 switch (device->state) {
800 case KGSL_STATE_ACTIVE:
801 if (!device->ftbl->isidle(device)) {
802 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700803 return -EBUSY;
804 }
805 /* fall through */
806 case KGSL_STATE_NAP:
807 case KGSL_STATE_SLEEP:
808 del_timer_sync(&device->idle_timer);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700809 device->ftbl->suspend_context(device);
810 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700811 _sleep_accounting(device);
812 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700813 pm_qos_update_request(&device->pm_qos_req_dma,
814 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700815 break;
816 case KGSL_STATE_SLUMBER:
817 break;
818 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700819 KGSL_PWR_WARN(device, "unhandled state %s\n",
820 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700821 break;
822 }
823 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700824}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825
826/******************************************************************/
827/* Caller must hold the device mutex. */
828int kgsl_pwrctrl_sleep(struct kgsl_device *device)
829{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700830 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
832
833 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700834 switch (device->requested_state) {
835 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700836 status = _nap(device);
837 break;
838 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700839 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700840 break;
841 case KGSL_STATE_SLUMBER:
842 status = _slumber(device);
843 break;
844 default:
845 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
846 device->requested_state);
847 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
848 status = -EINVAL;
849 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700851 return status;
852}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700853EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700854
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855/******************************************************************/
856/* Caller must hold the device mutex. */
857void kgsl_pwrctrl_wake(struct kgsl_device *device)
858{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700859 int status;
860 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
861 switch (device->state) {
862 case KGSL_STATE_SLUMBER:
863 status = device->ftbl->start(device, 0);
864 if (status) {
865 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
866 KGSL_DRV_ERR(device, "start failed %d\n", status);
867 break;
868 }
869 /* fall through */
870 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
872 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700873 /* fall through */
874 case KGSL_STATE_NAP:
875 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600876 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700877 /* Enable state before turning on irq */
878 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
879 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
880 /* Re-enable HW access */
881 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882 jiffies + device->pwrctrl.interval_timeout);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700883 if (device->pwrctrl.restore_slumber == false)
884 pm_qos_update_request(&device->pm_qos_req_dma,
885 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700886 case KGSL_STATE_ACTIVE:
887 break;
888 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700889 KGSL_PWR_WARN(device, "unhandled state %s\n",
890 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700891 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
892 break;
893 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894}
895EXPORT_SYMBOL(kgsl_pwrctrl_wake);
896
897void kgsl_pwrctrl_enable(struct kgsl_device *device)
898{
899 /* Order pwrrail/clk sequence based upon platform */
900 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600901 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
903}
904EXPORT_SYMBOL(kgsl_pwrctrl_enable);
905
906void kgsl_pwrctrl_disable(struct kgsl_device *device)
907{
908 /* Order pwrrail/clk sequence based upon platform */
909 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600910 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
912}
913EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700914
915void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
916{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700917 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700918 device->state = state;
919 device->requested_state = KGSL_STATE_NONE;
920}
921EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
922
923void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
924{
925 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700926 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700927 device->requested_state = state;
928}
929EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700930
931const char *kgsl_pwrstate_to_str(unsigned int state)
932{
933 switch (state) {
934 case KGSL_STATE_NONE:
935 return "NONE";
936 case KGSL_STATE_INIT:
937 return "INIT";
938 case KGSL_STATE_ACTIVE:
939 return "ACTIVE";
940 case KGSL_STATE_NAP:
941 return "NAP";
942 case KGSL_STATE_SLEEP:
943 return "SLEEP";
944 case KGSL_STATE_SUSPEND:
945 return "SUSPEND";
946 case KGSL_STATE_HUNG:
947 return "HUNG";
948 case KGSL_STATE_DUMP_AND_RECOVER:
949 return "DNR";
950 case KGSL_STATE_SLUMBER:
951 return "SLUMBER";
952 default:
953 break;
954 }
955 return "UNKNOWN";
956}
957EXPORT_SYMBOL(kgsl_pwrstate_to_str);
958