blob: 15a0252055e0d84acf585551ea3ec71bc6449e95 [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070020#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060022#define KGSL_PWRFLAGS_POWER_ON 0
23#define KGSL_PWRFLAGS_CLK_ON 1
24#define KGSL_PWRFLAGS_AXI_ON 2
25#define KGSL_PWRFLAGS_IRQ_ON 3
26
Lucille Sylvester10297892012-02-27 13:54:47 -070027#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070028#define UPDATE_BUSY_VAL 1000000
29#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060031struct clk_pair {
32 const char *name;
33 uint map;
34};
35
36struct clk_pair clks[KGSL_MAX_CLKS] = {
37 {
38 .name = "src_clk",
39 .map = KGSL_CLK_SRC,
40 },
41 {
42 .name = "core_clk",
43 .map = KGSL_CLK_CORE,
44 },
45 {
46 .name = "iface_clk",
47 .map = KGSL_CLK_IFACE,
48 },
49 {
50 .name = "mem_clk",
51 .map = KGSL_CLK_MEM,
52 },
53 {
54 .name = "mem_iface_clk",
55 .map = KGSL_CLK_MEM_IFACE,
56 },
57};
58
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
60 unsigned int new_level)
61{
62 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
63 if (new_level < (pwr->num_pwrlevels - 1) &&
64 new_level >= pwr->thermal_pwrlevel &&
65 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070066 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060067 int diff = new_level - pwr->active_pwrlevel;
68 int d = (diff > 0) ? 1 : -1;
69 int level = pwr->active_pwrlevel;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060071 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
Kedar Joshic11d0982012-02-07 10:59:49 +053072 (device->state == KGSL_STATE_NAP)) {
73 /*
74 * On some platforms, instability is caused on
75 * changing clock freq when the core is busy.
76 * Idle the gpu core before changing the clock freq.
77 */
78 if (pwr->idle_needed == true)
79 device->ftbl->idle(device,
80 KGSL_TIMEOUT_DEFAULT);
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060081 /* Don't shift by more than one level at a time to
82 * avoid glitches.
83 */
84 while (level != new_level) {
85 level += d;
86 clk_set_rate(pwr->grp_clks[0],
87 pwr->pwrlevels[level].gpu_freq);
88 }
Kedar Joshic11d0982012-02-07 10:59:49 +053089 }
Lucille Sylvester622927a2011-08-10 14:42:25 -060090 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091 if (pwr->pcl)
92 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070093 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060094 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070095 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060096 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070097 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
98 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 }
100}
101EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
102
103static int __gpuclk_store(int max, struct device *dev,
104 struct device_attribute *attr,
105 const char *buf, size_t count)
106{ int ret, i, delta = 5000000;
107 unsigned long val;
108 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600109 struct kgsl_pwrctrl *pwr;
110
111 if (device == NULL)
112 return 0;
113 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114
115 ret = sscanf(buf, "%ld", &val);
116 if (ret != 1)
117 return count;
118
119 mutex_lock(&device->mutex);
120 for (i = 0; i < pwr->num_pwrlevels; i++) {
121 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
122 if (max)
123 pwr->thermal_pwrlevel = i;
124 break;
125 }
126 }
127
128 if (i == pwr->num_pwrlevels)
129 goto done;
130
131 /*
132 * If the current or requested clock speed is greater than the
133 * thermal limit, bump down immediately.
134 */
135
136 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
137 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
138 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
139 else if (!max)
140 kgsl_pwrctrl_pwrlevel_change(device, i);
141
142done:
143 mutex_unlock(&device->mutex);
144 return count;
145}
146
147static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
148 struct device_attribute *attr,
149 const char *buf, size_t count)
150{
151 return __gpuclk_store(1, dev, attr, buf, count);
152}
153
154static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
155 struct device_attribute *attr,
156 char *buf)
157{
158 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600159 struct kgsl_pwrctrl *pwr;
160 if (device == NULL)
161 return 0;
162 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163 return snprintf(buf, PAGE_SIZE, "%d\n",
164 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
165}
166
167static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
168 struct device_attribute *attr,
169 const char *buf, size_t count)
170{
171 return __gpuclk_store(0, dev, attr, buf, count);
172}
173
174static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
175 struct device_attribute *attr,
176 char *buf)
177{
178 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600179 struct kgsl_pwrctrl *pwr;
180 if (device == NULL)
181 return 0;
182 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 return snprintf(buf, PAGE_SIZE, "%d\n",
184 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
185}
186
187static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
188 struct device_attribute *attr,
189 const char *buf, size_t count)
190{
191 char temp[20];
192 unsigned long val;
193 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600194 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 int rc;
196
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600197 if (device == NULL)
198 return 0;
199 pwr = &device->pwrctrl;
200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 snprintf(temp, sizeof(temp), "%.*s",
202 (int)min(count, sizeof(temp) - 1), buf);
203 rc = strict_strtoul(temp, 0, &val);
204 if (rc)
205 return rc;
206
207 mutex_lock(&device->mutex);
208
209 if (val == 1)
210 pwr->nap_allowed = true;
211 else if (val == 0)
212 pwr->nap_allowed = false;
213
214 mutex_unlock(&device->mutex);
215
216 return count;
217}
218
219static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
220 struct device_attribute *attr,
221 char *buf)
222{
223 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600224 if (device == NULL)
225 return 0;
226 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227}
228
229
230static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
231 struct device_attribute *attr,
232 const char *buf, size_t count)
233{
234 char temp[20];
235 unsigned long val;
236 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600237 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 const long div = 1000/HZ;
239 static unsigned int org_interval_timeout = 1;
240 int rc;
241
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600242 if (device == NULL)
243 return 0;
244 pwr = &device->pwrctrl;
245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246 snprintf(temp, sizeof(temp), "%.*s",
247 (int)min(count, sizeof(temp) - 1), buf);
248 rc = strict_strtoul(temp, 0, &val);
249 if (rc)
250 return rc;
251
252 if (org_interval_timeout == 1)
253 org_interval_timeout = pwr->interval_timeout;
254
255 mutex_lock(&device->mutex);
256
257 /* Let the timeout be requested in ms, but convert to jiffies. */
258 val /= div;
259 if (val >= org_interval_timeout)
260 pwr->interval_timeout = val;
261
262 mutex_unlock(&device->mutex);
263
264 return count;
265}
266
267static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
268 struct device_attribute *attr,
269 char *buf)
270{
271 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600272 if (device == NULL)
273 return 0;
274 return snprintf(buf, PAGE_SIZE, "%d\n",
275 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276}
277
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700278static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
279 struct device_attribute *attr,
280 char *buf)
281{
282 int ret;
283 struct kgsl_device *device = kgsl_device_from_dev(dev);
284 struct kgsl_busy *b = &device->pwrctrl.busy;
285 ret = snprintf(buf, 17, "%7d %7d\n",
286 b->on_time_old, b->time_old);
287 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
288 b->on_time_old = 0;
289 b->time_old = 0;
290 }
291 return ret;
292}
293
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
295DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
296 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530297DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
299 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700300DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
301 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302
303static const struct device_attribute *pwrctrl_attr_list[] = {
304 &dev_attr_gpuclk,
305 &dev_attr_max_gpuclk,
306 &dev_attr_pwrnap,
307 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700308 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 NULL
310};
311
312int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
313{
314 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
315}
316
317void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
318{
319 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
320}
321
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700322/* Track the amount of time the gpu is on vs the total system time. *
323 * Regularly update the percentage of busy time displayed by sysfs. */
324static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
325{
326 struct kgsl_busy *b = &device->pwrctrl.busy;
327 int elapsed;
328 if (b->start.tv_sec == 0)
329 do_gettimeofday(&(b->start));
330 do_gettimeofday(&(b->stop));
331 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
332 elapsed += b->stop.tv_usec - b->start.tv_usec;
333 b->time += elapsed;
334 if (on_time)
335 b->on_time += elapsed;
336 /* Update the output regularly and reset the counters. */
337 if ((b->time > UPDATE_BUSY_VAL) ||
338 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
339 b->on_time_old = b->on_time;
340 b->time_old = b->time;
341 b->on_time = 0;
342 b->time = 0;
343 }
344 do_gettimeofday(&(b->start));
345}
346
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600347void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
348 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349{
350 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
351 int i = 0;
352 if (state == KGSL_PWRFLAGS_OFF) {
353 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
354 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700355 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
357 if (pwr->grp_clks[i])
358 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700359 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700361 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 clk_set_rate(pwr->grp_clks[0],
363 pwr->pwrlevels[pwr->num_pwrlevels - 1].
364 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700365 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
366 if (pwr->grp_clks[i])
367 clk_unprepare(pwr->grp_clks[i]);
368 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700369 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 }
371 } else if (state == KGSL_PWRFLAGS_ON) {
372 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
373 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700374 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700375 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700377 (device->state != KGSL_STATE_NAP)) {
378 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
379 if (pwr->grp_clks[i])
380 clk_prepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 clk_set_rate(pwr->grp_clks[0],
382 pwr->pwrlevels[pwr->active_pwrlevel].
383 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700384 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385
386 /* as last step, enable grp_clk
387 this is to let GPU interrupt to come */
388 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
389 if (pwr->grp_clks[i])
390 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700391 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392 }
393 }
394}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395
396void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
397{
398 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
399
400 if (state == KGSL_PWRFLAGS_OFF) {
401 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
402 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700403 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530404 if (pwr->ebi1_clk) {
405 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530407 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 if (pwr->pcl)
409 msm_bus_scale_client_update_request(pwr->pcl,
410 0);
411 }
412 } else if (state == KGSL_PWRFLAGS_ON) {
413 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
414 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700415 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530416 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530418 clk_set_rate(pwr->ebi1_clk,
419 pwr->pwrlevels[pwr->active_pwrlevel].
420 bus_freq);
421 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422 if (pwr->pcl)
423 msm_bus_scale_client_update_request(pwr->pcl,
424 pwr->pwrlevels[pwr->active_pwrlevel].
425 bus_freq);
426 }
427 }
428}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429
430void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
431{
432 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
433
434 if (state == KGSL_PWRFLAGS_OFF) {
435 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
436 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700437 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438 if (pwr->gpu_reg)
439 regulator_disable(pwr->gpu_reg);
440 }
441 } else if (state == KGSL_PWRFLAGS_ON) {
442 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
443 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700444 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445 if (pwr->gpu_reg)
446 regulator_enable(pwr->gpu_reg);
447 }
448 }
449}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450
451void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
452{
453 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
454
455 if (state == KGSL_PWRFLAGS_ON) {
456 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
457 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700458 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 }
461 } else if (state == KGSL_PWRFLAGS_OFF) {
462 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
463 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700464 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600465 if (in_interrupt())
466 disable_irq_nosync(pwr->interrupt_num);
467 else
468 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 }
470 }
471}
472EXPORT_SYMBOL(kgsl_pwrctrl_irq);
473
474int kgsl_pwrctrl_init(struct kgsl_device *device)
475{
476 int i, result = 0;
477 struct clk *clk;
478 struct platform_device *pdev =
479 container_of(device->parentdev, struct platform_device, dev);
480 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600481 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482
483 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600484 for (i = 0; i < KGSL_MAX_CLKS; i++) {
485 if (pdata->clk_map & clks[i].map) {
486 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 if (IS_ERR(clk))
488 goto clk_err;
489 pwr->grp_clks[i] = clk;
490 }
491 }
492 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600493 if (pwr->grp_clks[0] == NULL)
494 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495
496 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600497 if (pdata->set_grp_async != NULL)
498 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600500 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600502 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 result = -EINVAL;
504 goto done;
505 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600506 pwr->num_pwrlevels = pdata->num_levels;
507 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800508 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600509 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600511 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600513 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 gpu_freq) : 0;
515 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600516 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600517 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600518 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 }
520 /* Do not set_rate for targets in sync with AXI */
521 if (pwr->pwrlevels[0].gpu_freq > 0)
522 clk_set_rate(pwr->grp_clks[0], pwr->
523 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
524
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700525 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 if (IS_ERR(pwr->gpu_reg))
527 pwr->gpu_reg = NULL;
528
529 pwr->power_flags = 0;
530
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600531 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530532 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600533 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700534 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700535 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 if (IS_ERR(pwr->ebi1_clk))
537 pwr->ebi1_clk = NULL;
538 else
539 clk_set_rate(pwr->ebi1_clk,
540 pwr->pwrlevels[pwr->active_pwrlevel].
541 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600542 if (pdata->bus_scale_table != NULL) {
543 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544 bus_scale_table);
545 if (!pwr->pcl) {
546 KGSL_PWR_ERR(device,
547 "msm_bus_scale_register_client failed: "
548 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600549 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 result = -EINVAL;
551 goto done;
552 }
553 }
554
555 /*acquire interrupt */
556 pwr->interrupt_num =
557 platform_get_irq_byname(pdev, pwr->irq_name);
558
559 if (pwr->interrupt_num <= 0) {
560 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
561 pwr->interrupt_num);
562 result = -EINVAL;
563 goto done;
564 }
565
566 register_early_suspend(&device->display_off);
567 return result;
568
569clk_err:
570 result = PTR_ERR(clk);
571 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600572 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573
574done:
575 return result;
576}
577
578void kgsl_pwrctrl_close(struct kgsl_device *device)
579{
580 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
581 int i;
582
583 KGSL_PWR_INFO(device, "close device %d\n", device->id);
584
585 unregister_early_suspend(&device->display_off);
586
587 if (pwr->interrupt_num > 0) {
588 if (pwr->have_irq) {
589 free_irq(pwr->interrupt_num, NULL);
590 pwr->have_irq = 0;
591 }
592 pwr->interrupt_num = 0;
593 }
594
595 clk_put(pwr->ebi1_clk);
596
597 if (pwr->pcl)
598 msm_bus_scale_unregister_client(pwr->pcl);
599
600 pwr->pcl = 0;
601
602 if (pwr->gpu_reg) {
603 regulator_put(pwr->gpu_reg);
604 pwr->gpu_reg = NULL;
605 }
606
607 for (i = 1; i < KGSL_MAX_CLKS; i++)
608 if (pwr->grp_clks[i]) {
609 clk_put(pwr->grp_clks[i]);
610 pwr->grp_clks[i] = NULL;
611 }
612
613 pwr->grp_clks[0] = NULL;
614 pwr->power_flags = 0;
615}
616
617void kgsl_idle_check(struct work_struct *work)
618{
619 struct kgsl_device *device = container_of(work, struct kgsl_device,
620 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700621 WARN_ON(device == NULL);
622 if (device == NULL)
623 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624
625 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Lucille Sylvester6e362412011-12-09 16:21:42 -0700627 kgsl_pwrscale_idle(device);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600628
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700629 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 mod_timer(&device->idle_timer,
631 jiffies +
632 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700633 /* If the GPU has been too busy to sleep, make sure *
634 * that is acurately reflected in the % busy numbers. */
635 device->pwrctrl.busy.no_nap_cnt++;
636 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
637 kgsl_pwrctrl_busy_time(device, true);
638 device->pwrctrl.busy.no_nap_cnt = 0;
639 }
640 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 } else if (device->state & (KGSL_STATE_HUNG |
642 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700643 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 }
645
646 mutex_unlock(&device->mutex);
647}
648
649void kgsl_timer(unsigned long data)
650{
651 struct kgsl_device *device = (struct kgsl_device *) data;
652
653 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530654 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -0700655 if (device->pwrctrl.restore_slumber ||
656 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700657 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
658 else
659 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660 /* Have work run in a non-interrupt context. */
661 queue_work(device->work_queue, &device->idle_check_ws);
662 }
663}
664
665void kgsl_pre_hwaccess(struct kgsl_device *device)
666{
667 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700668 switch (device->state) {
669 case KGSL_STATE_ACTIVE:
670 return;
671 case KGSL_STATE_NAP:
672 case KGSL_STATE_SLEEP:
673 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700675 break;
676 case KGSL_STATE_SUSPEND:
677 kgsl_check_suspended(device);
678 break;
679 case KGSL_STATE_INIT:
680 case KGSL_STATE_HUNG:
681 case KGSL_STATE_DUMP_AND_RECOVER:
682 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
683 &device->pwrctrl.power_flags))
684 break;
685 else
686 KGSL_PWR_ERR(device,
687 "hw access while clocks off from state %d\n",
688 device->state);
689 break;
690 default:
691 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
692 device->state);
693 break;
694 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695}
696EXPORT_SYMBOL(kgsl_pre_hwaccess);
697
698void kgsl_check_suspended(struct kgsl_device *device)
699{
700 if (device->requested_state == KGSL_STATE_SUSPEND ||
701 device->state == KGSL_STATE_SUSPEND) {
702 mutex_unlock(&device->mutex);
703 wait_for_completion(&device->hwaccess_gate);
704 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700705 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 mutex_unlock(&device->mutex);
707 wait_for_completion(&device->recovery_gate);
708 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700709 } else if (device->state == KGSL_STATE_SLUMBER)
710 kgsl_pwrctrl_wake(device);
711}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712
Suman Tatiraju24569022011-10-27 11:11:12 -0700713static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700714_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700715{
Suman Tatiraju24569022011-10-27 11:11:12 -0700716 switch (device->state) {
717 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700718 if (!device->ftbl->isidle(device)) {
719 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
720 return -EBUSY;
721 }
722 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600723 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
724 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700725 if (device->idle_wakelock.name)
726 wake_unlock(&device->idle_wakelock);
Suman Tatiraju24569022011-10-27 11:11:12 -0700727 case KGSL_STATE_NAP:
728 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700729 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700730 break;
731 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700732 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700733 break;
734 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700735 return 0;
736}
737
738static void
739_sleep_accounting(struct kgsl_device *device)
740{
741 kgsl_pwrctrl_busy_time(device, false);
742 device->pwrctrl.busy.start.tv_sec = 0;
743 device->pwrctrl.time = 0;
744 kgsl_pwrscale_sleep(device);
745}
746
747static int
748_sleep(struct kgsl_device *device)
749{
750 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
751 switch (device->state) {
752 case KGSL_STATE_ACTIVE:
753 if (!device->ftbl->isidle(device)) {
754 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
755 return -EBUSY;
756 }
757 /* fall through */
758 case KGSL_STATE_NAP:
759 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
760 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
761 if (pwr->pwrlevels[0].gpu_freq > 0)
762 clk_set_rate(pwr->grp_clks[0],
763 pwr->pwrlevels[pwr->num_pwrlevels - 1].
764 gpu_freq);
765 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600766 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700767 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -0700768 wake_unlock(&device->idle_wakelock);
769 pm_qos_update_request(&device->pm_qos_req_dma,
770 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700771 break;
772 case KGSL_STATE_SLEEP:
773 case KGSL_STATE_SLUMBER:
774 break;
775 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700776 KGSL_PWR_WARN(device, "unhandled state %s\n",
777 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700778 break;
779 }
780 return 0;
781}
782
783static int
784_slumber(struct kgsl_device *device)
785{
786 switch (device->state) {
787 case KGSL_STATE_ACTIVE:
788 if (!device->ftbl->isidle(device)) {
789 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
790 device->pwrctrl.restore_slumber = true;
791 return -EBUSY;
792 }
793 /* fall through */
794 case KGSL_STATE_NAP:
795 case KGSL_STATE_SLEEP:
796 del_timer_sync(&device->idle_timer);
Lynus Vazfe4bede2012-04-06 11:53:30 -0700797 if (!device->pwrctrl.strtstp_sleepwake)
798 kgsl_pwrctrl_pwrlevel_change(device,
799 KGSL_PWRLEVEL_NOMINAL);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700800 device->pwrctrl.restore_slumber = true;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700801 device->ftbl->suspend_context(device);
802 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700803 _sleep_accounting(device);
804 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
805 if (device->idle_wakelock.name)
806 wake_unlock(&device->idle_wakelock);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700807 pm_qos_update_request(&device->pm_qos_req_dma,
808 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700809 break;
810 case KGSL_STATE_SLUMBER:
811 break;
812 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700813 KGSL_PWR_WARN(device, "unhandled state %s\n",
814 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700815 break;
816 }
817 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700818}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819
820/******************************************************************/
821/* Caller must hold the device mutex. */
822int kgsl_pwrctrl_sleep(struct kgsl_device *device)
823{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700824 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
826
827 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700828 switch (device->requested_state) {
829 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700830 status = _nap(device);
831 break;
832 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700833 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700834 break;
835 case KGSL_STATE_SLUMBER:
836 status = _slumber(device);
837 break;
838 default:
839 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
840 device->requested_state);
841 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
842 status = -EINVAL;
843 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700845 return status;
846}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700847EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700848
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849/******************************************************************/
850/* Caller must hold the device mutex. */
851void kgsl_pwrctrl_wake(struct kgsl_device *device)
852{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700853 int status;
854 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
855 switch (device->state) {
856 case KGSL_STATE_SLUMBER:
857 status = device->ftbl->start(device, 0);
858 if (status) {
859 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
860 KGSL_DRV_ERR(device, "start failed %d\n", status);
861 break;
862 }
863 /* fall through */
864 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700865 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
866 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700867 /* fall through */
868 case KGSL_STATE_NAP:
869 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600870 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700871 /* Enable state before turning on irq */
872 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
873 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
874 /* Re-enable HW access */
875 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 jiffies + device->pwrctrl.interval_timeout);
Lucille Sylvester10297892012-02-27 13:54:47 -0700877 wake_lock(&device->idle_wakelock);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700878 if (device->pwrctrl.restore_slumber == false)
879 pm_qos_update_request(&device->pm_qos_req_dma,
880 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700881 case KGSL_STATE_ACTIVE:
882 break;
883 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700884 KGSL_PWR_WARN(device, "unhandled state %s\n",
885 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700886 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
887 break;
888 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889}
890EXPORT_SYMBOL(kgsl_pwrctrl_wake);
891
892void kgsl_pwrctrl_enable(struct kgsl_device *device)
893{
894 /* Order pwrrail/clk sequence based upon platform */
895 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600896 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
898}
899EXPORT_SYMBOL(kgsl_pwrctrl_enable);
900
901void kgsl_pwrctrl_disable(struct kgsl_device *device)
902{
903 /* Order pwrrail/clk sequence based upon platform */
904 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600905 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
907}
908EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700909
910void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
911{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700912 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700913 device->state = state;
914 device->requested_state = KGSL_STATE_NONE;
915}
916EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
917
918void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
919{
920 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700921 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700922 device->requested_state = state;
923}
924EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700925
926const char *kgsl_pwrstate_to_str(unsigned int state)
927{
928 switch (state) {
929 case KGSL_STATE_NONE:
930 return "NONE";
931 case KGSL_STATE_INIT:
932 return "INIT";
933 case KGSL_STATE_ACTIVE:
934 return "ACTIVE";
935 case KGSL_STATE_NAP:
936 return "NAP";
937 case KGSL_STATE_SLEEP:
938 return "SLEEP";
939 case KGSL_STATE_SUSPEND:
940 return "SUSPEND";
941 case KGSL_STATE_HUNG:
942 return "HUNG";
943 case KGSL_STATE_DUMP_AND_RECOVER:
944 return "DNR";
945 case KGSL_STATE_SLUMBER:
946 return "SLUMBER";
947 default:
948 break;
949 }
950 return "UNKNOWN";
951}
952EXPORT_SYMBOL(kgsl_pwrstate_to_str);
953