blob: f3fa797cf9a4711286c69a2d355bfb1ace46b0c5 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
20
21#define GPU_SWFI_LATENCY 3
22
23void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
24 unsigned int new_level)
25{
26 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
27 if (new_level < (pwr->num_pwrlevels - 1) &&
28 new_level >= pwr->thermal_pwrlevel &&
29 new_level != pwr->active_pwrlevel) {
30 pwr->active_pwrlevel = new_level;
31 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
32 clk_set_rate(pwr->grp_clks[0],
33 pwr->pwrlevels[pwr->active_pwrlevel].
34 gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060035 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036 if (pwr->pcl)
37 msm_bus_scale_client_update_request(pwr->pcl,
38 pwr->pwrlevels[pwr->active_pwrlevel].
39 bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060040 else if (pwr->ebi1_clk)
41 clk_set_rate(pwr->ebi1_clk,
42 pwr->pwrlevels[pwr->active_pwrlevel].
43 bus_freq);
44 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
46 pwr->active_pwrlevel);
47 }
48}
49EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
50
51static int __gpuclk_store(int max, struct device *dev,
52 struct device_attribute *attr,
53 const char *buf, size_t count)
54{ int ret, i, delta = 5000000;
55 unsigned long val;
56 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060057 struct kgsl_pwrctrl *pwr;
58
59 if (device == NULL)
60 return 0;
61 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
63 ret = sscanf(buf, "%ld", &val);
64 if (ret != 1)
65 return count;
66
67 mutex_lock(&device->mutex);
68 for (i = 0; i < pwr->num_pwrlevels; i++) {
69 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
70 if (max)
71 pwr->thermal_pwrlevel = i;
72 break;
73 }
74 }
75
76 if (i == pwr->num_pwrlevels)
77 goto done;
78
79 /*
80 * If the current or requested clock speed is greater than the
81 * thermal limit, bump down immediately.
82 */
83
84 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
85 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
86 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
87 else if (!max)
88 kgsl_pwrctrl_pwrlevel_change(device, i);
89
90done:
91 mutex_unlock(&device->mutex);
92 return count;
93}
94
95static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
96 struct device_attribute *attr,
97 const char *buf, size_t count)
98{
99 return __gpuclk_store(1, dev, attr, buf, count);
100}
101
102static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
103 struct device_attribute *attr,
104 char *buf)
105{
106 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600107 struct kgsl_pwrctrl *pwr;
108 if (device == NULL)
109 return 0;
110 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111 return snprintf(buf, PAGE_SIZE, "%d\n",
112 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
113}
114
115static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
116 struct device_attribute *attr,
117 const char *buf, size_t count)
118{
119 return __gpuclk_store(0, dev, attr, buf, count);
120}
121
122static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
123 struct device_attribute *attr,
124 char *buf)
125{
126 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600127 struct kgsl_pwrctrl *pwr;
128 if (device == NULL)
129 return 0;
130 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 return snprintf(buf, PAGE_SIZE, "%d\n",
132 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
133}
134
135static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
136 struct device_attribute *attr,
137 const char *buf, size_t count)
138{
139 char temp[20];
140 unsigned long val;
141 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600142 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143 int rc;
144
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600145 if (device == NULL)
146 return 0;
147 pwr = &device->pwrctrl;
148
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 snprintf(temp, sizeof(temp), "%.*s",
150 (int)min(count, sizeof(temp) - 1), buf);
151 rc = strict_strtoul(temp, 0, &val);
152 if (rc)
153 return rc;
154
155 mutex_lock(&device->mutex);
156
157 if (val == 1)
158 pwr->nap_allowed = true;
159 else if (val == 0)
160 pwr->nap_allowed = false;
161
162 mutex_unlock(&device->mutex);
163
164 return count;
165}
166
167static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
168 struct device_attribute *attr,
169 char *buf)
170{
171 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600172 if (device == NULL)
173 return 0;
174 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175}
176
177
178static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
179 struct device_attribute *attr,
180 const char *buf, size_t count)
181{
182 char temp[20];
183 unsigned long val;
184 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600185 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186 const long div = 1000/HZ;
187 static unsigned int org_interval_timeout = 1;
188 int rc;
189
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600190 if (device == NULL)
191 return 0;
192 pwr = &device->pwrctrl;
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 snprintf(temp, sizeof(temp), "%.*s",
195 (int)min(count, sizeof(temp) - 1), buf);
196 rc = strict_strtoul(temp, 0, &val);
197 if (rc)
198 return rc;
199
200 if (org_interval_timeout == 1)
201 org_interval_timeout = pwr->interval_timeout;
202
203 mutex_lock(&device->mutex);
204
205 /* Let the timeout be requested in ms, but convert to jiffies. */
206 val /= div;
207 if (val >= org_interval_timeout)
208 pwr->interval_timeout = val;
209
210 mutex_unlock(&device->mutex);
211
212 return count;
213}
214
215static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
216 struct device_attribute *attr,
217 char *buf)
218{
219 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600220 if (device == NULL)
221 return 0;
222 return snprintf(buf, PAGE_SIZE, "%d\n",
223 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224}
225
226DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
227DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
228 kgsl_pwrctrl_max_gpuclk_store);
229DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
230DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
231 kgsl_pwrctrl_idle_timer_store);
232
233static const struct device_attribute *pwrctrl_attr_list[] = {
234 &dev_attr_gpuclk,
235 &dev_attr_max_gpuclk,
236 &dev_attr_pwrnap,
237 &dev_attr_idle_timer,
238 NULL
239};
240
241int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
242{
243 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
244}
245
246void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
247{
248 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
249}
250
251void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
252{
253 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
254 int i = 0;
255 if (state == KGSL_PWRFLAGS_OFF) {
256 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
257 &pwr->power_flags)) {
258 KGSL_PWR_INFO(device,
259 "clocks off, device %d\n", device->id);
260 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
261 if (pwr->grp_clks[i])
262 clk_disable(pwr->grp_clks[i]);
263 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
264 (device->requested_state != KGSL_STATE_NAP))
265 clk_set_rate(pwr->grp_clks[0],
266 pwr->pwrlevels[pwr->num_pwrlevels - 1].
267 gpu_freq);
268 }
269 } else if (state == KGSL_PWRFLAGS_ON) {
270 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
271 &pwr->power_flags)) {
272 KGSL_PWR_INFO(device,
273 "clocks on, device %d\n", device->id);
274
275 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
276 (device->state != KGSL_STATE_NAP))
277 clk_set_rate(pwr->grp_clks[0],
278 pwr->pwrlevels[pwr->active_pwrlevel].
279 gpu_freq);
280
281 /* as last step, enable grp_clk
282 this is to let GPU interrupt to come */
283 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
284 if (pwr->grp_clks[i])
285 clk_enable(pwr->grp_clks[i]);
286 }
287 }
288}
289EXPORT_SYMBOL(kgsl_pwrctrl_clk);
290
291void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
292{
293 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
294
295 if (state == KGSL_PWRFLAGS_OFF) {
296 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
297 &pwr->power_flags)) {
298 KGSL_PWR_INFO(device,
299 "axi off, device %d\n", device->id);
300 if (pwr->ebi1_clk)
301 clk_disable(pwr->ebi1_clk);
302 if (pwr->pcl)
303 msm_bus_scale_client_update_request(pwr->pcl,
304 0);
305 }
306 } else if (state == KGSL_PWRFLAGS_ON) {
307 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
308 &pwr->power_flags)) {
309 KGSL_PWR_INFO(device,
310 "axi on, device %d\n", device->id);
311 if (pwr->ebi1_clk)
312 clk_enable(pwr->ebi1_clk);
313 if (pwr->pcl)
314 msm_bus_scale_client_update_request(pwr->pcl,
315 pwr->pwrlevels[pwr->active_pwrlevel].
316 bus_freq);
317 }
318 }
319}
320EXPORT_SYMBOL(kgsl_pwrctrl_axi);
321
322
323void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
324{
325 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
326
327 if (state == KGSL_PWRFLAGS_OFF) {
328 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
329 &pwr->power_flags)) {
330 KGSL_PWR_INFO(device,
331 "power off, device %d\n", device->id);
332 if (pwr->gpu_reg)
333 regulator_disable(pwr->gpu_reg);
334 }
335 } else if (state == KGSL_PWRFLAGS_ON) {
336 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
337 &pwr->power_flags)) {
338 KGSL_PWR_INFO(device,
339 "power on, device %d\n", device->id);
340 if (pwr->gpu_reg)
341 regulator_enable(pwr->gpu_reg);
342 }
343 }
344}
345EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
346
347void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
348{
349 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
350
351 if (state == KGSL_PWRFLAGS_ON) {
352 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
353 &pwr->power_flags)) {
354 KGSL_PWR_INFO(device,
355 "irq on, device %d\n", device->id);
356 enable_irq(pwr->interrupt_num);
357 device->ftbl->irqctrl(device, 1);
358 }
359 } else if (state == KGSL_PWRFLAGS_OFF) {
360 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
361 &pwr->power_flags)) {
362 KGSL_PWR_INFO(device,
363 "irq off, device %d\n", device->id);
364 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600365 if (in_interrupt())
366 disable_irq_nosync(pwr->interrupt_num);
367 else
368 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 }
370 }
371}
372EXPORT_SYMBOL(kgsl_pwrctrl_irq);
373
374int kgsl_pwrctrl_init(struct kgsl_device *device)
375{
376 int i, result = 0;
377 struct clk *clk;
378 struct platform_device *pdev =
379 container_of(device->parentdev, struct platform_device, dev);
380 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
381 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
382 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
383 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
384 pdata_dev->clk.name.clk,
385 pdata_dev->clk.name.pclk,
386 pdata_dev->imem_clk_name.clk,
387 pdata_dev->imem_clk_name.pclk};
388
389 /*acquire clocks */
390 for (i = 1; i < KGSL_MAX_CLKS; i++) {
391 if (clk_names[i]) {
392 clk = clk_get(&pdev->dev, clk_names[i]);
393 if (IS_ERR(clk))
394 goto clk_err;
395 pwr->grp_clks[i] = clk;
396 }
397 }
398 /* Make sure we have a source clk for freq setting */
399 clk = clk_get(&pdev->dev, clk_names[0]);
400 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
401
402 /* put the AXI bus into asynchronous mode with the graphics cores */
403 if (pdata_pwr->set_grp_async != NULL)
404 pdata_pwr->set_grp_async();
405
406 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
407 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
408 pdata_pwr->num_levels);
409 result = -EINVAL;
410 goto done;
411 }
412 pwr->num_pwrlevels = pdata_pwr->num_levels;
413 pwr->active_pwrlevel = pdata_pwr->init_level;
414 for (i = 0; i < pdata_pwr->num_levels; i++) {
415 pwr->pwrlevels[i].gpu_freq =
416 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
417 clk_round_rate(pwr->grp_clks[0],
418 pdata_pwr->pwrlevel[i].
419 gpu_freq) : 0;
420 pwr->pwrlevels[i].bus_freq =
421 pdata_pwr->pwrlevel[i].bus_freq;
422 }
423 /* Do not set_rate for targets in sync with AXI */
424 if (pwr->pwrlevels[0].gpu_freq > 0)
425 clk_set_rate(pwr->grp_clks[0], pwr->
426 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
427
428 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
429 if (IS_ERR(pwr->gpu_reg))
430 pwr->gpu_reg = NULL;
431
432 pwr->power_flags = 0;
433
434 pwr->nap_allowed = pdata_pwr->nap_allowed;
435 pwr->interval_timeout = pdata_pwr->idle_timeout;
436 pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
437 if (IS_ERR(pwr->ebi1_clk))
438 pwr->ebi1_clk = NULL;
439 else
440 clk_set_rate(pwr->ebi1_clk,
441 pwr->pwrlevels[pwr->active_pwrlevel].
442 bus_freq);
443 if (pdata_dev->clk.bus_scale_table != NULL) {
444 pwr->pcl =
445 msm_bus_scale_register_client(pdata_dev->clk.
446 bus_scale_table);
447 if (!pwr->pcl) {
448 KGSL_PWR_ERR(device,
449 "msm_bus_scale_register_client failed: "
450 "id %d table %p", device->id,
451 pdata_dev->clk.bus_scale_table);
452 result = -EINVAL;
453 goto done;
454 }
455 }
456
457 /*acquire interrupt */
458 pwr->interrupt_num =
459 platform_get_irq_byname(pdev, pwr->irq_name);
460
461 if (pwr->interrupt_num <= 0) {
462 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
463 pwr->interrupt_num);
464 result = -EINVAL;
465 goto done;
466 }
467
468 register_early_suspend(&device->display_off);
469 return result;
470
471clk_err:
472 result = PTR_ERR(clk);
473 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
474 clk_names[i], result);
475
476done:
477 return result;
478}
479
480void kgsl_pwrctrl_close(struct kgsl_device *device)
481{
482 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
483 int i;
484
485 KGSL_PWR_INFO(device, "close device %d\n", device->id);
486
487 unregister_early_suspend(&device->display_off);
488
489 if (pwr->interrupt_num > 0) {
490 if (pwr->have_irq) {
491 free_irq(pwr->interrupt_num, NULL);
492 pwr->have_irq = 0;
493 }
494 pwr->interrupt_num = 0;
495 }
496
497 clk_put(pwr->ebi1_clk);
498
499 if (pwr->pcl)
500 msm_bus_scale_unregister_client(pwr->pcl);
501
502 pwr->pcl = 0;
503
504 if (pwr->gpu_reg) {
505 regulator_put(pwr->gpu_reg);
506 pwr->gpu_reg = NULL;
507 }
508
509 for (i = 1; i < KGSL_MAX_CLKS; i++)
510 if (pwr->grp_clks[i]) {
511 clk_put(pwr->grp_clks[i]);
512 pwr->grp_clks[i] = NULL;
513 }
514
515 pwr->grp_clks[0] = NULL;
516 pwr->power_flags = 0;
517}
518
519void kgsl_idle_check(struct work_struct *work)
520{
521 struct kgsl_device *device = container_of(work, struct kgsl_device,
522 idle_check_ws);
523
524 mutex_lock(&device->mutex);
525 if (device->requested_state != KGSL_STATE_SLEEP)
526 kgsl_pwrscale_idle(device);
527
528 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
529 if (kgsl_pwrctrl_sleep(device) != 0)
530 mod_timer(&device->idle_timer,
531 jiffies +
532 device->pwrctrl.interval_timeout);
533 } else if (device->state & (KGSL_STATE_HUNG |
534 KGSL_STATE_DUMP_AND_RECOVER)) {
535 device->requested_state = KGSL_STATE_NONE;
536 }
537
538 mutex_unlock(&device->mutex);
539}
540
541void kgsl_timer(unsigned long data)
542{
543 struct kgsl_device *device = (struct kgsl_device *) data;
544
545 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
546 if (device->requested_state != KGSL_STATE_SUSPEND) {
547 device->requested_state = KGSL_STATE_SLEEP;
548 /* Have work run in a non-interrupt context. */
549 queue_work(device->work_queue, &device->idle_check_ws);
550 }
551}
552
553void kgsl_pre_hwaccess(struct kgsl_device *device)
554{
555 BUG_ON(!mutex_is_locked(&device->mutex));
556 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
557 kgsl_pwrctrl_wake(device);
558}
559EXPORT_SYMBOL(kgsl_pre_hwaccess);
560
561void kgsl_check_suspended(struct kgsl_device *device)
562{
563 if (device->requested_state == KGSL_STATE_SUSPEND ||
564 device->state == KGSL_STATE_SUSPEND) {
565 mutex_unlock(&device->mutex);
566 wait_for_completion(&device->hwaccess_gate);
567 mutex_lock(&device->mutex);
568 }
569 if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
570 mutex_unlock(&device->mutex);
571 wait_for_completion(&device->recovery_gate);
572 mutex_lock(&device->mutex);
573 }
574 }
575
576
577/******************************************************************/
578/* Caller must hold the device mutex. */
579int kgsl_pwrctrl_sleep(struct kgsl_device *device)
580{
581 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
582 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
583
584 /* Work through the legal state transitions */
585 if (device->requested_state == KGSL_STATE_NAP) {
586 if (device->ftbl->isidle(device))
587 goto nap;
588 } else if (device->requested_state == KGSL_STATE_SLEEP) {
589 if (device->state == KGSL_STATE_NAP ||
590 device->ftbl->isidle(device))
591 goto sleep;
592 }
593
594 device->requested_state = KGSL_STATE_NONE;
595 return -EBUSY;
596
597sleep:
598 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
599 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
600 if (pwr->pwrlevels[0].gpu_freq > 0)
601 clk_set_rate(pwr->grp_clks[0],
602 pwr->pwrlevels[pwr->num_pwrlevels - 1].
603 gpu_freq);
604 device->pwrctrl.time = 0;
605
606 kgsl_pwrscale_sleep(device);
607 goto clk_off;
608
609nap:
610 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
611clk_off:
612 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
613
614 device->state = device->requested_state;
615 device->requested_state = KGSL_STATE_NONE;
616 wake_unlock(&device->idle_wakelock);
617 pm_qos_update_request(&device->pm_qos_req_dma,
618 PM_QOS_DEFAULT_VALUE);
619 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
620 device->state, device->id);
621
622 return 0;
623}
624EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
625
626/******************************************************************/
627/* Caller must hold the device mutex. */
628void kgsl_pwrctrl_wake(struct kgsl_device *device)
629{
630 if (device->state == KGSL_STATE_SUSPEND)
631 return;
632
633 if (device->state != KGSL_STATE_NAP) {
634 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
635 kgsl_pwrscale_wake(device);
636 }
637
638 /* Turn on the core clocks */
639 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
640
641 /* Enable state before turning on irq */
642 device->state = KGSL_STATE_ACTIVE;
643 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
644 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
645
646 /* Re-enable HW access */
647 mod_timer(&device->idle_timer,
648 jiffies + device->pwrctrl.interval_timeout);
649
650 wake_lock(&device->idle_wakelock);
651 pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
652 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
653}
654EXPORT_SYMBOL(kgsl_pwrctrl_wake);
655
656void kgsl_pwrctrl_enable(struct kgsl_device *device)
657{
658 /* Order pwrrail/clk sequence based upon platform */
659 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
660 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
661 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
662}
663EXPORT_SYMBOL(kgsl_pwrctrl_enable);
664
665void kgsl_pwrctrl_disable(struct kgsl_device *device)
666{
667 /* Order pwrrail/clk sequence based upon platform */
668 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
669 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
670 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
671}
672EXPORT_SYMBOL(kgsl_pwrctrl_disable);