blob: 7c451a96442d83ae57b683aa0e016645c33d7dc7 [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070020#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060022#define KGSL_PWRFLAGS_POWER_ON 0
23#define KGSL_PWRFLAGS_CLK_ON 1
24#define KGSL_PWRFLAGS_AXI_ON 2
25#define KGSL_PWRFLAGS_IRQ_ON 3
26
Lucille Sylvester10297892012-02-27 13:54:47 -070027#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070028#define UPDATE_BUSY_VAL 1000000
29#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060031struct clk_pair {
32 const char *name;
33 uint map;
34};
35
36struct clk_pair clks[KGSL_MAX_CLKS] = {
37 {
38 .name = "src_clk",
39 .map = KGSL_CLK_SRC,
40 },
41 {
42 .name = "core_clk",
43 .map = KGSL_CLK_CORE,
44 },
45 {
46 .name = "iface_clk",
47 .map = KGSL_CLK_IFACE,
48 },
49 {
50 .name = "mem_clk",
51 .map = KGSL_CLK_MEM,
52 },
53 {
54 .name = "mem_iface_clk",
55 .map = KGSL_CLK_MEM_IFACE,
56 },
57};
58
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
60 unsigned int new_level)
61{
62 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
63 if (new_level < (pwr->num_pwrlevels - 1) &&
64 new_level >= pwr->thermal_pwrlevel &&
65 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070066 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060068 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
Kedar Joshic11d0982012-02-07 10:59:49 +053069 (device->state == KGSL_STATE_NAP)) {
70 /*
71 * On some platforms, instability is caused on
72 * changing clock freq when the core is busy.
73 * Idle the gpu core before changing the clock freq.
74 */
75 if (pwr->idle_needed == true)
76 device->ftbl->idle(device,
77 KGSL_TIMEOUT_DEFAULT);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070078 clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq);
Kedar Joshic11d0982012-02-07 10:59:49 +053079 }
Lucille Sylvester622927a2011-08-10 14:42:25 -060080 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 if (pwr->pcl)
82 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070083 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060084 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070085 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060086 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070087 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
88 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089 }
90}
91EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
92
93static int __gpuclk_store(int max, struct device *dev,
94 struct device_attribute *attr,
95 const char *buf, size_t count)
96{ int ret, i, delta = 5000000;
97 unsigned long val;
98 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060099 struct kgsl_pwrctrl *pwr;
100
101 if (device == NULL)
102 return 0;
103 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104
105 ret = sscanf(buf, "%ld", &val);
106 if (ret != 1)
107 return count;
108
109 mutex_lock(&device->mutex);
110 for (i = 0; i < pwr->num_pwrlevels; i++) {
111 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
112 if (max)
113 pwr->thermal_pwrlevel = i;
114 break;
115 }
116 }
117
118 if (i == pwr->num_pwrlevels)
119 goto done;
120
121 /*
122 * If the current or requested clock speed is greater than the
123 * thermal limit, bump down immediately.
124 */
125
126 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
127 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
128 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
129 else if (!max)
130 kgsl_pwrctrl_pwrlevel_change(device, i);
131
132done:
133 mutex_unlock(&device->mutex);
134 return count;
135}
136
137static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
138 struct device_attribute *attr,
139 const char *buf, size_t count)
140{
141 return __gpuclk_store(1, dev, attr, buf, count);
142}
143
144static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
145 struct device_attribute *attr,
146 char *buf)
147{
148 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600149 struct kgsl_pwrctrl *pwr;
150 if (device == NULL)
151 return 0;
152 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 return snprintf(buf, PAGE_SIZE, "%d\n",
154 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
155}
156
157static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
158 struct device_attribute *attr,
159 const char *buf, size_t count)
160{
161 return __gpuclk_store(0, dev, attr, buf, count);
162}
163
164static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
165 struct device_attribute *attr,
166 char *buf)
167{
168 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600169 struct kgsl_pwrctrl *pwr;
170 if (device == NULL)
171 return 0;
172 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173 return snprintf(buf, PAGE_SIZE, "%d\n",
174 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
175}
176
177static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
178 struct device_attribute *attr,
179 const char *buf, size_t count)
180{
181 char temp[20];
182 unsigned long val;
183 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600184 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 int rc;
186
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600187 if (device == NULL)
188 return 0;
189 pwr = &device->pwrctrl;
190
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 snprintf(temp, sizeof(temp), "%.*s",
192 (int)min(count, sizeof(temp) - 1), buf);
193 rc = strict_strtoul(temp, 0, &val);
194 if (rc)
195 return rc;
196
197 mutex_lock(&device->mutex);
198
199 if (val == 1)
200 pwr->nap_allowed = true;
201 else if (val == 0)
202 pwr->nap_allowed = false;
203
204 mutex_unlock(&device->mutex);
205
206 return count;
207}
208
209static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
210 struct device_attribute *attr,
211 char *buf)
212{
213 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600214 if (device == NULL)
215 return 0;
216 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217}
218
219
220static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
221 struct device_attribute *attr,
222 const char *buf, size_t count)
223{
224 char temp[20];
225 unsigned long val;
226 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600227 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 const long div = 1000/HZ;
229 static unsigned int org_interval_timeout = 1;
230 int rc;
231
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600232 if (device == NULL)
233 return 0;
234 pwr = &device->pwrctrl;
235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 snprintf(temp, sizeof(temp), "%.*s",
237 (int)min(count, sizeof(temp) - 1), buf);
238 rc = strict_strtoul(temp, 0, &val);
239 if (rc)
240 return rc;
241
242 if (org_interval_timeout == 1)
243 org_interval_timeout = pwr->interval_timeout;
244
245 mutex_lock(&device->mutex);
246
247 /* Let the timeout be requested in ms, but convert to jiffies. */
248 val /= div;
249 if (val >= org_interval_timeout)
250 pwr->interval_timeout = val;
251
252 mutex_unlock(&device->mutex);
253
254 return count;
255}
256
257static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
258 struct device_attribute *attr,
259 char *buf)
260{
261 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600262 if (device == NULL)
263 return 0;
264 return snprintf(buf, PAGE_SIZE, "%d\n",
265 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266}
267
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700268static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
269 struct device_attribute *attr,
270 char *buf)
271{
272 int ret;
273 struct kgsl_device *device = kgsl_device_from_dev(dev);
274 struct kgsl_busy *b = &device->pwrctrl.busy;
275 ret = snprintf(buf, 17, "%7d %7d\n",
276 b->on_time_old, b->time_old);
277 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
278 b->on_time_old = 0;
279 b->time_old = 0;
280 }
281 return ret;
282}
283
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
285DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
286 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530287DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
289 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700290DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
291 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292
293static const struct device_attribute *pwrctrl_attr_list[] = {
294 &dev_attr_gpuclk,
295 &dev_attr_max_gpuclk,
296 &dev_attr_pwrnap,
297 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700298 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 NULL
300};
301
302int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
303{
304 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
305}
306
307void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
308{
309 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
310}
311
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700312/* Track the amount of time the gpu is on vs the total system time. *
313 * Regularly update the percentage of busy time displayed by sysfs. */
314static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
315{
316 struct kgsl_busy *b = &device->pwrctrl.busy;
317 int elapsed;
318 if (b->start.tv_sec == 0)
319 do_gettimeofday(&(b->start));
320 do_gettimeofday(&(b->stop));
321 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
322 elapsed += b->stop.tv_usec - b->start.tv_usec;
323 b->time += elapsed;
324 if (on_time)
325 b->on_time += elapsed;
326 /* Update the output regularly and reset the counters. */
327 if ((b->time > UPDATE_BUSY_VAL) ||
328 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
329 b->on_time_old = b->on_time;
330 b->time_old = b->time;
331 b->on_time = 0;
332 b->time = 0;
333 }
334 do_gettimeofday(&(b->start));
335}
336
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600337void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
338 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339{
340 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
341 int i = 0;
342 if (state == KGSL_PWRFLAGS_OFF) {
343 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
344 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700345 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
347 if (pwr->grp_clks[i])
348 clk_disable(pwr->grp_clks[i]);
349 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600350 (requested_state != KGSL_STATE_NAP))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 clk_set_rate(pwr->grp_clks[0],
352 pwr->pwrlevels[pwr->num_pwrlevels - 1].
353 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700354 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 }
356 } else if (state == KGSL_PWRFLAGS_ON) {
357 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
358 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700359 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
361 (device->state != KGSL_STATE_NAP))
362 clk_set_rate(pwr->grp_clks[0],
363 pwr->pwrlevels[pwr->active_pwrlevel].
364 gpu_freq);
365
366 /* as last step, enable grp_clk
367 this is to let GPU interrupt to come */
368 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
369 if (pwr->grp_clks[i])
370 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700371 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 }
373 }
374}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375
376void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
377{
378 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
379
380 if (state == KGSL_PWRFLAGS_OFF) {
381 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
382 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700383 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530384 if (pwr->ebi1_clk) {
385 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530387 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 if (pwr->pcl)
389 msm_bus_scale_client_update_request(pwr->pcl,
390 0);
391 }
392 } else if (state == KGSL_PWRFLAGS_ON) {
393 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
394 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700395 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530396 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530398 clk_set_rate(pwr->ebi1_clk,
399 pwr->pwrlevels[pwr->active_pwrlevel].
400 bus_freq);
401 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 if (pwr->pcl)
403 msm_bus_scale_client_update_request(pwr->pcl,
404 pwr->pwrlevels[pwr->active_pwrlevel].
405 bus_freq);
406 }
407 }
408}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409
410void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
411{
412 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
413
414 if (state == KGSL_PWRFLAGS_OFF) {
415 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
416 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700417 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 if (pwr->gpu_reg)
419 regulator_disable(pwr->gpu_reg);
420 }
421 } else if (state == KGSL_PWRFLAGS_ON) {
422 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
423 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700424 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 if (pwr->gpu_reg)
426 regulator_enable(pwr->gpu_reg);
427 }
428 }
429}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430
431void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
432{
433 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
434
435 if (state == KGSL_PWRFLAGS_ON) {
436 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
437 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700438 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 }
441 } else if (state == KGSL_PWRFLAGS_OFF) {
442 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
443 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700444 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600445 if (in_interrupt())
446 disable_irq_nosync(pwr->interrupt_num);
447 else
448 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449 }
450 }
451}
452EXPORT_SYMBOL(kgsl_pwrctrl_irq);
453
454int kgsl_pwrctrl_init(struct kgsl_device *device)
455{
456 int i, result = 0;
457 struct clk *clk;
458 struct platform_device *pdev =
459 container_of(device->parentdev, struct platform_device, dev);
460 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600461 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462
463 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600464 for (i = 0; i < KGSL_MAX_CLKS; i++) {
465 if (pdata->clk_map & clks[i].map) {
466 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 if (IS_ERR(clk))
468 goto clk_err;
469 pwr->grp_clks[i] = clk;
470 }
471 }
472 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600473 if (pwr->grp_clks[0] == NULL)
474 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700475
476 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600477 if (pdata->set_grp_async != NULL)
478 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600480 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600482 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 result = -EINVAL;
484 goto done;
485 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600486 pwr->num_pwrlevels = pdata->num_levels;
487 pwr->active_pwrlevel = pdata->init_level;
488 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600490 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600492 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 gpu_freq) : 0;
494 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600495 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600496 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600497 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 }
499 /* Do not set_rate for targets in sync with AXI */
500 if (pwr->pwrlevels[0].gpu_freq > 0)
501 clk_set_rate(pwr->grp_clks[0], pwr->
502 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
503
504 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
505 if (IS_ERR(pwr->gpu_reg))
506 pwr->gpu_reg = NULL;
507
508 pwr->power_flags = 0;
509
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600510 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530511 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600512 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700513 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700514 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 if (IS_ERR(pwr->ebi1_clk))
516 pwr->ebi1_clk = NULL;
517 else
518 clk_set_rate(pwr->ebi1_clk,
519 pwr->pwrlevels[pwr->active_pwrlevel].
520 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600521 if (pdata->bus_scale_table != NULL) {
522 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 bus_scale_table);
524 if (!pwr->pcl) {
525 KGSL_PWR_ERR(device,
526 "msm_bus_scale_register_client failed: "
527 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600528 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 result = -EINVAL;
530 goto done;
531 }
532 }
533
534 /*acquire interrupt */
535 pwr->interrupt_num =
536 platform_get_irq_byname(pdev, pwr->irq_name);
537
538 if (pwr->interrupt_num <= 0) {
539 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
540 pwr->interrupt_num);
541 result = -EINVAL;
542 goto done;
543 }
544
545 register_early_suspend(&device->display_off);
546 return result;
547
548clk_err:
549 result = PTR_ERR(clk);
550 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600551 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552
553done:
554 return result;
555}
556
557void kgsl_pwrctrl_close(struct kgsl_device *device)
558{
559 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
560 int i;
561
562 KGSL_PWR_INFO(device, "close device %d\n", device->id);
563
564 unregister_early_suspend(&device->display_off);
565
566 if (pwr->interrupt_num > 0) {
567 if (pwr->have_irq) {
568 free_irq(pwr->interrupt_num, NULL);
569 pwr->have_irq = 0;
570 }
571 pwr->interrupt_num = 0;
572 }
573
574 clk_put(pwr->ebi1_clk);
575
576 if (pwr->pcl)
577 msm_bus_scale_unregister_client(pwr->pcl);
578
579 pwr->pcl = 0;
580
581 if (pwr->gpu_reg) {
582 regulator_put(pwr->gpu_reg);
583 pwr->gpu_reg = NULL;
584 }
585
586 for (i = 1; i < KGSL_MAX_CLKS; i++)
587 if (pwr->grp_clks[i]) {
588 clk_put(pwr->grp_clks[i]);
589 pwr->grp_clks[i] = NULL;
590 }
591
592 pwr->grp_clks[0] = NULL;
593 pwr->power_flags = 0;
594}
595
596void kgsl_idle_check(struct work_struct *work)
597{
598 struct kgsl_device *device = container_of(work, struct kgsl_device,
599 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700600 WARN_ON(device == NULL);
601 if (device == NULL)
602 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603
604 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Lucille Sylvester6e362412011-12-09 16:21:42 -0700606 kgsl_pwrscale_idle(device);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600607
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700608 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 mod_timer(&device->idle_timer,
610 jiffies +
611 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700612 /* If the GPU has been too busy to sleep, make sure *
613 * that is acurately reflected in the % busy numbers. */
614 device->pwrctrl.busy.no_nap_cnt++;
615 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
616 kgsl_pwrctrl_busy_time(device, true);
617 device->pwrctrl.busy.no_nap_cnt = 0;
618 }
619 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 } else if (device->state & (KGSL_STATE_HUNG |
621 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700622 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 }
624
625 mutex_unlock(&device->mutex);
626}
627
628void kgsl_timer(unsigned long data)
629{
630 struct kgsl_device *device = (struct kgsl_device *) data;
631
632 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530633 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -0700634 if (device->pwrctrl.restore_slumber ||
635 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700636 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
637 else
638 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 /* Have work run in a non-interrupt context. */
640 queue_work(device->work_queue, &device->idle_check_ws);
641 }
642}
643
644void kgsl_pre_hwaccess(struct kgsl_device *device)
645{
646 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700647 switch (device->state) {
648 case KGSL_STATE_ACTIVE:
649 return;
650 case KGSL_STATE_NAP:
651 case KGSL_STATE_SLEEP:
652 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700654 break;
655 case KGSL_STATE_SUSPEND:
656 kgsl_check_suspended(device);
657 break;
658 case KGSL_STATE_INIT:
659 case KGSL_STATE_HUNG:
660 case KGSL_STATE_DUMP_AND_RECOVER:
661 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
662 &device->pwrctrl.power_flags))
663 break;
664 else
665 KGSL_PWR_ERR(device,
666 "hw access while clocks off from state %d\n",
667 device->state);
668 break;
669 default:
670 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
671 device->state);
672 break;
673 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674}
675EXPORT_SYMBOL(kgsl_pre_hwaccess);
676
677void kgsl_check_suspended(struct kgsl_device *device)
678{
679 if (device->requested_state == KGSL_STATE_SUSPEND ||
680 device->state == KGSL_STATE_SUSPEND) {
681 mutex_unlock(&device->mutex);
682 wait_for_completion(&device->hwaccess_gate);
683 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700684 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685 mutex_unlock(&device->mutex);
686 wait_for_completion(&device->recovery_gate);
687 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700688 } else if (device->state == KGSL_STATE_SLUMBER)
689 kgsl_pwrctrl_wake(device);
690}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691
Suman Tatiraju24569022011-10-27 11:11:12 -0700692static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700693_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700694{
Suman Tatiraju24569022011-10-27 11:11:12 -0700695 switch (device->state) {
696 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700697 if (!device->ftbl->isidle(device)) {
698 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
699 return -EBUSY;
700 }
701 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600702 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
703 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700704 if (device->idle_wakelock.name)
705 wake_unlock(&device->idle_wakelock);
Suman Tatiraju24569022011-10-27 11:11:12 -0700706 case KGSL_STATE_NAP:
707 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700708 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700709 break;
710 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700711 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700712 break;
713 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700714 return 0;
715}
716
717static void
718_sleep_accounting(struct kgsl_device *device)
719{
720 kgsl_pwrctrl_busy_time(device, false);
721 device->pwrctrl.busy.start.tv_sec = 0;
722 device->pwrctrl.time = 0;
723 kgsl_pwrscale_sleep(device);
724}
725
726static int
727_sleep(struct kgsl_device *device)
728{
729 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
730 switch (device->state) {
731 case KGSL_STATE_ACTIVE:
732 if (!device->ftbl->isidle(device)) {
733 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
734 return -EBUSY;
735 }
736 /* fall through */
737 case KGSL_STATE_NAP:
738 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
739 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
740 if (pwr->pwrlevels[0].gpu_freq > 0)
741 clk_set_rate(pwr->grp_clks[0],
742 pwr->pwrlevels[pwr->num_pwrlevels - 1].
743 gpu_freq);
744 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600745 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700746 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -0700747 wake_unlock(&device->idle_wakelock);
748 pm_qos_update_request(&device->pm_qos_req_dma,
749 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700750 break;
751 case KGSL_STATE_SLEEP:
752 case KGSL_STATE_SLUMBER:
753 break;
754 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700755 KGSL_PWR_WARN(device, "unhandled state %s\n",
756 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700757 break;
758 }
759 return 0;
760}
761
762static int
763_slumber(struct kgsl_device *device)
764{
765 switch (device->state) {
766 case KGSL_STATE_ACTIVE:
767 if (!device->ftbl->isidle(device)) {
768 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
769 device->pwrctrl.restore_slumber = true;
770 return -EBUSY;
771 }
772 /* fall through */
773 case KGSL_STATE_NAP:
774 case KGSL_STATE_SLEEP:
775 del_timer_sync(&device->idle_timer);
Lynus Vazfe4bede2012-04-06 11:53:30 -0700776 if (!device->pwrctrl.strtstp_sleepwake)
777 kgsl_pwrctrl_pwrlevel_change(device,
778 KGSL_PWRLEVEL_NOMINAL);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700779 device->pwrctrl.restore_slumber = true;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700780 device->ftbl->suspend_context(device);
781 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700782 _sleep_accounting(device);
783 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
784 if (device->idle_wakelock.name)
785 wake_unlock(&device->idle_wakelock);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700786 pm_qos_update_request(&device->pm_qos_req_dma,
787 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700788 break;
789 case KGSL_STATE_SLUMBER:
790 break;
791 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700792 KGSL_PWR_WARN(device, "unhandled state %s\n",
793 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700794 break;
795 }
796 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700797}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798
799/******************************************************************/
800/* Caller must hold the device mutex. */
801int kgsl_pwrctrl_sleep(struct kgsl_device *device)
802{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700803 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
805
806 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700807 switch (device->requested_state) {
808 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700809 status = _nap(device);
810 break;
811 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700812 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700813 break;
814 case KGSL_STATE_SLUMBER:
815 status = _slumber(device);
816 break;
817 default:
818 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
819 device->requested_state);
820 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
821 status = -EINVAL;
822 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700824 return status;
825}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700826EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828/******************************************************************/
829/* Caller must hold the device mutex. */
830void kgsl_pwrctrl_wake(struct kgsl_device *device)
831{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700832 int status;
833 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
834 switch (device->state) {
835 case KGSL_STATE_SLUMBER:
836 status = device->ftbl->start(device, 0);
837 if (status) {
838 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
839 KGSL_DRV_ERR(device, "start failed %d\n", status);
840 break;
841 }
842 /* fall through */
843 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
845 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700846 /* fall through */
847 case KGSL_STATE_NAP:
848 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600849 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700850 /* Enable state before turning on irq */
851 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
852 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
853 /* Re-enable HW access */
854 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 jiffies + device->pwrctrl.interval_timeout);
Lucille Sylvester10297892012-02-27 13:54:47 -0700856 wake_lock(&device->idle_wakelock);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700857 if (device->pwrctrl.restore_slumber == false)
858 pm_qos_update_request(&device->pm_qos_req_dma,
859 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700860 case KGSL_STATE_ACTIVE:
861 break;
862 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700863 KGSL_PWR_WARN(device, "unhandled state %s\n",
864 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700865 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
866 break;
867 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700868}
869EXPORT_SYMBOL(kgsl_pwrctrl_wake);
870
871void kgsl_pwrctrl_enable(struct kgsl_device *device)
872{
873 /* Order pwrrail/clk sequence based upon platform */
874 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600875 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
877}
878EXPORT_SYMBOL(kgsl_pwrctrl_enable);
879
880void kgsl_pwrctrl_disable(struct kgsl_device *device)
881{
882 /* Order pwrrail/clk sequence based upon platform */
883 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600884 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
886}
887EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700888
889void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
890{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700891 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700892 device->state = state;
893 device->requested_state = KGSL_STATE_NONE;
894}
895EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
896
897void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
898{
899 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700900 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700901 device->requested_state = state;
902}
903EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700904
905const char *kgsl_pwrstate_to_str(unsigned int state)
906{
907 switch (state) {
908 case KGSL_STATE_NONE:
909 return "NONE";
910 case KGSL_STATE_INIT:
911 return "INIT";
912 case KGSL_STATE_ACTIVE:
913 return "ACTIVE";
914 case KGSL_STATE_NAP:
915 return "NAP";
916 case KGSL_STATE_SLEEP:
917 return "SLEEP";
918 case KGSL_STATE_SUSPEND:
919 return "SUSPEND";
920 case KGSL_STATE_HUNG:
921 return "HUNG";
922 case KGSL_STATE_DUMP_AND_RECOVER:
923 return "DNR";
924 case KGSL_STATE_SLUMBER:
925 return "SLUMBER";
926 default:
927 break;
928 }
929 return "UNKNOWN";
930}
931EXPORT_SYMBOL(kgsl_pwrstate_to_str);
932