blob: 4089966277c791bd69f1f17d13c15fedd80debc5 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
20
21#define GPU_SWFI_LATENCY 3
22
23void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
24 unsigned int new_level)
25{
26 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
27 if (new_level < (pwr->num_pwrlevels - 1) &&
28 new_level >= pwr->thermal_pwrlevel &&
29 new_level != pwr->active_pwrlevel) {
30 pwr->active_pwrlevel = new_level;
31 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
32 clk_set_rate(pwr->grp_clks[0],
33 pwr->pwrlevels[pwr->active_pwrlevel].
34 gpu_freq);
35 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
36 if (pwr->pcl)
37 msm_bus_scale_client_update_request(pwr->pcl,
38 pwr->pwrlevels[pwr->active_pwrlevel].
39 bus_freq);
40 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
41 pwr->active_pwrlevel);
42 }
43}
44EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
45
46static int __gpuclk_store(int max, struct device *dev,
47 struct device_attribute *attr,
48 const char *buf, size_t count)
49{ int ret, i, delta = 5000000;
50 unsigned long val;
51 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060052 struct kgsl_pwrctrl *pwr;
53
54 if (device == NULL)
55 return 0;
56 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
58 ret = sscanf(buf, "%ld", &val);
59 if (ret != 1)
60 return count;
61
62 mutex_lock(&device->mutex);
63 for (i = 0; i < pwr->num_pwrlevels; i++) {
64 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
65 if (max)
66 pwr->thermal_pwrlevel = i;
67 break;
68 }
69 }
70
71 if (i == pwr->num_pwrlevels)
72 goto done;
73
74 /*
75 * If the current or requested clock speed is greater than the
76 * thermal limit, bump down immediately.
77 */
78
79 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
80 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
81 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
82 else if (!max)
83 kgsl_pwrctrl_pwrlevel_change(device, i);
84
85done:
86 mutex_unlock(&device->mutex);
87 return count;
88}
89
90static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
91 struct device_attribute *attr,
92 const char *buf, size_t count)
93{
94 return __gpuclk_store(1, dev, attr, buf, count);
95}
96
97static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
98 struct device_attribute *attr,
99 char *buf)
100{
101 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600102 struct kgsl_pwrctrl *pwr;
103 if (device == NULL)
104 return 0;
105 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 return snprintf(buf, PAGE_SIZE, "%d\n",
107 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
108}
109
110static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
111 struct device_attribute *attr,
112 const char *buf, size_t count)
113{
114 return __gpuclk_store(0, dev, attr, buf, count);
115}
116
117static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
118 struct device_attribute *attr,
119 char *buf)
120{
121 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600122 struct kgsl_pwrctrl *pwr;
123 if (device == NULL)
124 return 0;
125 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return snprintf(buf, PAGE_SIZE, "%d\n",
127 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
128}
129
130static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
131 struct device_attribute *attr,
132 const char *buf, size_t count)
133{
134 char temp[20];
135 unsigned long val;
136 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600137 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138 int rc;
139
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600140 if (device == NULL)
141 return 0;
142 pwr = &device->pwrctrl;
143
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 snprintf(temp, sizeof(temp), "%.*s",
145 (int)min(count, sizeof(temp) - 1), buf);
146 rc = strict_strtoul(temp, 0, &val);
147 if (rc)
148 return rc;
149
150 mutex_lock(&device->mutex);
151
152 if (val == 1)
153 pwr->nap_allowed = true;
154 else if (val == 0)
155 pwr->nap_allowed = false;
156
157 mutex_unlock(&device->mutex);
158
159 return count;
160}
161
162static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
163 struct device_attribute *attr,
164 char *buf)
165{
166 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600167 if (device == NULL)
168 return 0;
169 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170}
171
172
173static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
174 struct device_attribute *attr,
175 const char *buf, size_t count)
176{
177 char temp[20];
178 unsigned long val;
179 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600180 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181 const long div = 1000/HZ;
182 static unsigned int org_interval_timeout = 1;
183 int rc;
184
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600185 if (device == NULL)
186 return 0;
187 pwr = &device->pwrctrl;
188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 snprintf(temp, sizeof(temp), "%.*s",
190 (int)min(count, sizeof(temp) - 1), buf);
191 rc = strict_strtoul(temp, 0, &val);
192 if (rc)
193 return rc;
194
195 if (org_interval_timeout == 1)
196 org_interval_timeout = pwr->interval_timeout;
197
198 mutex_lock(&device->mutex);
199
200 /* Let the timeout be requested in ms, but convert to jiffies. */
201 val /= div;
202 if (val >= org_interval_timeout)
203 pwr->interval_timeout = val;
204
205 mutex_unlock(&device->mutex);
206
207 return count;
208}
209
210static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
211 struct device_attribute *attr,
212 char *buf)
213{
214 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600215 if (device == NULL)
216 return 0;
217 return snprintf(buf, PAGE_SIZE, "%d\n",
218 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219}
220
221DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
222DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
223 kgsl_pwrctrl_max_gpuclk_store);
224DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
225DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
226 kgsl_pwrctrl_idle_timer_store);
227
228static const struct device_attribute *pwrctrl_attr_list[] = {
229 &dev_attr_gpuclk,
230 &dev_attr_max_gpuclk,
231 &dev_attr_pwrnap,
232 &dev_attr_idle_timer,
233 NULL
234};
235
236int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
237{
238 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
239}
240
241void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
242{
243 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
244}
245
246void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
247{
248 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
249 int i = 0;
250 if (state == KGSL_PWRFLAGS_OFF) {
251 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
252 &pwr->power_flags)) {
253 KGSL_PWR_INFO(device,
254 "clocks off, device %d\n", device->id);
255 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
256 if (pwr->grp_clks[i])
257 clk_disable(pwr->grp_clks[i]);
258 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
259 (device->requested_state != KGSL_STATE_NAP))
260 clk_set_rate(pwr->grp_clks[0],
261 pwr->pwrlevels[pwr->num_pwrlevels - 1].
262 gpu_freq);
263 }
264 } else if (state == KGSL_PWRFLAGS_ON) {
265 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
266 &pwr->power_flags)) {
267 KGSL_PWR_INFO(device,
268 "clocks on, device %d\n", device->id);
269
270 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
271 (device->state != KGSL_STATE_NAP))
272 clk_set_rate(pwr->grp_clks[0],
273 pwr->pwrlevels[pwr->active_pwrlevel].
274 gpu_freq);
275
276 /* as last step, enable grp_clk
277 this is to let GPU interrupt to come */
278 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
279 if (pwr->grp_clks[i])
280 clk_enable(pwr->grp_clks[i]);
281 }
282 }
283}
284EXPORT_SYMBOL(kgsl_pwrctrl_clk);
285
286void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
287{
288 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
289
290 if (state == KGSL_PWRFLAGS_OFF) {
291 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
292 &pwr->power_flags)) {
293 KGSL_PWR_INFO(device,
294 "axi off, device %d\n", device->id);
295 if (pwr->ebi1_clk)
296 clk_disable(pwr->ebi1_clk);
297 if (pwr->pcl)
298 msm_bus_scale_client_update_request(pwr->pcl,
299 0);
300 }
301 } else if (state == KGSL_PWRFLAGS_ON) {
302 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
303 &pwr->power_flags)) {
304 KGSL_PWR_INFO(device,
305 "axi on, device %d\n", device->id);
306 if (pwr->ebi1_clk)
307 clk_enable(pwr->ebi1_clk);
308 if (pwr->pcl)
309 msm_bus_scale_client_update_request(pwr->pcl,
310 pwr->pwrlevels[pwr->active_pwrlevel].
311 bus_freq);
312 }
313 }
314}
315EXPORT_SYMBOL(kgsl_pwrctrl_axi);
316
317
318void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
319{
320 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
321
322 if (state == KGSL_PWRFLAGS_OFF) {
323 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
324 &pwr->power_flags)) {
325 KGSL_PWR_INFO(device,
326 "power off, device %d\n", device->id);
327 if (pwr->gpu_reg)
328 regulator_disable(pwr->gpu_reg);
329 }
330 } else if (state == KGSL_PWRFLAGS_ON) {
331 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
332 &pwr->power_flags)) {
333 KGSL_PWR_INFO(device,
334 "power on, device %d\n", device->id);
335 if (pwr->gpu_reg)
336 regulator_enable(pwr->gpu_reg);
337 }
338 }
339}
340EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
341
342void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
343{
344 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
345
346 if (state == KGSL_PWRFLAGS_ON) {
347 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
348 &pwr->power_flags)) {
349 KGSL_PWR_INFO(device,
350 "irq on, device %d\n", device->id);
351 enable_irq(pwr->interrupt_num);
352 device->ftbl->irqctrl(device, 1);
353 }
354 } else if (state == KGSL_PWRFLAGS_OFF) {
355 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
356 &pwr->power_flags)) {
357 KGSL_PWR_INFO(device,
358 "irq off, device %d\n", device->id);
359 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600360 if (in_interrupt())
361 disable_irq_nosync(pwr->interrupt_num);
362 else
363 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364 }
365 }
366}
367EXPORT_SYMBOL(kgsl_pwrctrl_irq);
368
369int kgsl_pwrctrl_init(struct kgsl_device *device)
370{
371 int i, result = 0;
372 struct clk *clk;
373 struct platform_device *pdev =
374 container_of(device->parentdev, struct platform_device, dev);
375 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
376 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
377 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
378 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
379 pdata_dev->clk.name.clk,
380 pdata_dev->clk.name.pclk,
381 pdata_dev->imem_clk_name.clk,
382 pdata_dev->imem_clk_name.pclk};
383
384 /*acquire clocks */
385 for (i = 1; i < KGSL_MAX_CLKS; i++) {
386 if (clk_names[i]) {
387 clk = clk_get(&pdev->dev, clk_names[i]);
388 if (IS_ERR(clk))
389 goto clk_err;
390 pwr->grp_clks[i] = clk;
391 }
392 }
393 /* Make sure we have a source clk for freq setting */
394 clk = clk_get(&pdev->dev, clk_names[0]);
395 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
396
397 /* put the AXI bus into asynchronous mode with the graphics cores */
398 if (pdata_pwr->set_grp_async != NULL)
399 pdata_pwr->set_grp_async();
400
401 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
402 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
403 pdata_pwr->num_levels);
404 result = -EINVAL;
405 goto done;
406 }
407 pwr->num_pwrlevels = pdata_pwr->num_levels;
408 pwr->active_pwrlevel = pdata_pwr->init_level;
409 for (i = 0; i < pdata_pwr->num_levels; i++) {
410 pwr->pwrlevels[i].gpu_freq =
411 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
412 clk_round_rate(pwr->grp_clks[0],
413 pdata_pwr->pwrlevel[i].
414 gpu_freq) : 0;
415 pwr->pwrlevels[i].bus_freq =
416 pdata_pwr->pwrlevel[i].bus_freq;
417 }
418 /* Do not set_rate for targets in sync with AXI */
419 if (pwr->pwrlevels[0].gpu_freq > 0)
420 clk_set_rate(pwr->grp_clks[0], pwr->
421 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
422
423 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
424 if (IS_ERR(pwr->gpu_reg))
425 pwr->gpu_reg = NULL;
426
427 pwr->power_flags = 0;
428
429 pwr->nap_allowed = pdata_pwr->nap_allowed;
430 pwr->interval_timeout = pdata_pwr->idle_timeout;
431 pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
432 if (IS_ERR(pwr->ebi1_clk))
433 pwr->ebi1_clk = NULL;
434 else
435 clk_set_rate(pwr->ebi1_clk,
436 pwr->pwrlevels[pwr->active_pwrlevel].
437 bus_freq);
438 if (pdata_dev->clk.bus_scale_table != NULL) {
439 pwr->pcl =
440 msm_bus_scale_register_client(pdata_dev->clk.
441 bus_scale_table);
442 if (!pwr->pcl) {
443 KGSL_PWR_ERR(device,
444 "msm_bus_scale_register_client failed: "
445 "id %d table %p", device->id,
446 pdata_dev->clk.bus_scale_table);
447 result = -EINVAL;
448 goto done;
449 }
450 }
451
452 /*acquire interrupt */
453 pwr->interrupt_num =
454 platform_get_irq_byname(pdev, pwr->irq_name);
455
456 if (pwr->interrupt_num <= 0) {
457 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
458 pwr->interrupt_num);
459 result = -EINVAL;
460 goto done;
461 }
462
463 register_early_suspend(&device->display_off);
464 return result;
465
466clk_err:
467 result = PTR_ERR(clk);
468 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
469 clk_names[i], result);
470
471done:
472 return result;
473}
474
475void kgsl_pwrctrl_close(struct kgsl_device *device)
476{
477 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
478 int i;
479
480 KGSL_PWR_INFO(device, "close device %d\n", device->id);
481
482 unregister_early_suspend(&device->display_off);
483
484 if (pwr->interrupt_num > 0) {
485 if (pwr->have_irq) {
486 free_irq(pwr->interrupt_num, NULL);
487 pwr->have_irq = 0;
488 }
489 pwr->interrupt_num = 0;
490 }
491
492 clk_put(pwr->ebi1_clk);
493
494 if (pwr->pcl)
495 msm_bus_scale_unregister_client(pwr->pcl);
496
497 pwr->pcl = 0;
498
499 if (pwr->gpu_reg) {
500 regulator_put(pwr->gpu_reg);
501 pwr->gpu_reg = NULL;
502 }
503
504 for (i = 1; i < KGSL_MAX_CLKS; i++)
505 if (pwr->grp_clks[i]) {
506 clk_put(pwr->grp_clks[i]);
507 pwr->grp_clks[i] = NULL;
508 }
509
510 pwr->grp_clks[0] = NULL;
511 pwr->power_flags = 0;
512}
513
514void kgsl_idle_check(struct work_struct *work)
515{
516 struct kgsl_device *device = container_of(work, struct kgsl_device,
517 idle_check_ws);
518
519 mutex_lock(&device->mutex);
520 if (device->requested_state != KGSL_STATE_SLEEP)
521 kgsl_pwrscale_idle(device);
522
523 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
524 if (kgsl_pwrctrl_sleep(device) != 0)
525 mod_timer(&device->idle_timer,
526 jiffies +
527 device->pwrctrl.interval_timeout);
528 } else if (device->state & (KGSL_STATE_HUNG |
529 KGSL_STATE_DUMP_AND_RECOVER)) {
530 device->requested_state = KGSL_STATE_NONE;
531 }
532
533 mutex_unlock(&device->mutex);
534}
535
536void kgsl_timer(unsigned long data)
537{
538 struct kgsl_device *device = (struct kgsl_device *) data;
539
540 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
541 if (device->requested_state != KGSL_STATE_SUSPEND) {
542 device->requested_state = KGSL_STATE_SLEEP;
543 /* Have work run in a non-interrupt context. */
544 queue_work(device->work_queue, &device->idle_check_ws);
545 }
546}
547
548void kgsl_pre_hwaccess(struct kgsl_device *device)
549{
550 BUG_ON(!mutex_is_locked(&device->mutex));
551 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
552 kgsl_pwrctrl_wake(device);
553}
554EXPORT_SYMBOL(kgsl_pre_hwaccess);
555
556void kgsl_check_suspended(struct kgsl_device *device)
557{
558 if (device->requested_state == KGSL_STATE_SUSPEND ||
559 device->state == KGSL_STATE_SUSPEND) {
560 mutex_unlock(&device->mutex);
561 wait_for_completion(&device->hwaccess_gate);
562 mutex_lock(&device->mutex);
563 }
564 if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
565 mutex_unlock(&device->mutex);
566 wait_for_completion(&device->recovery_gate);
567 mutex_lock(&device->mutex);
568 }
569 }
570
571
572/******************************************************************/
573/* Caller must hold the device mutex. */
574int kgsl_pwrctrl_sleep(struct kgsl_device *device)
575{
576 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
577 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
578
579 /* Work through the legal state transitions */
580 if (device->requested_state == KGSL_STATE_NAP) {
581 if (device->ftbl->isidle(device))
582 goto nap;
583 } else if (device->requested_state == KGSL_STATE_SLEEP) {
584 if (device->state == KGSL_STATE_NAP ||
585 device->ftbl->isidle(device))
586 goto sleep;
587 }
588
589 device->requested_state = KGSL_STATE_NONE;
590 return -EBUSY;
591
592sleep:
593 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
594 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
595 if (pwr->pwrlevels[0].gpu_freq > 0)
596 clk_set_rate(pwr->grp_clks[0],
597 pwr->pwrlevels[pwr->num_pwrlevels - 1].
598 gpu_freq);
599 device->pwrctrl.time = 0;
600
601 kgsl_pwrscale_sleep(device);
602 goto clk_off;
603
604nap:
605 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
606clk_off:
607 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
608
609 device->state = device->requested_state;
610 device->requested_state = KGSL_STATE_NONE;
611 wake_unlock(&device->idle_wakelock);
612 pm_qos_update_request(&device->pm_qos_req_dma,
613 PM_QOS_DEFAULT_VALUE);
614 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
615 device->state, device->id);
616
617 return 0;
618}
619EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
620
621/******************************************************************/
622/* Caller must hold the device mutex. */
623void kgsl_pwrctrl_wake(struct kgsl_device *device)
624{
625 if (device->state == KGSL_STATE_SUSPEND)
626 return;
627
628 if (device->state != KGSL_STATE_NAP) {
629 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
630 kgsl_pwrscale_wake(device);
631 }
632
633 /* Turn on the core clocks */
634 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
635
636 /* Enable state before turning on irq */
637 device->state = KGSL_STATE_ACTIVE;
638 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
639 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
640
641 /* Re-enable HW access */
642 mod_timer(&device->idle_timer,
643 jiffies + device->pwrctrl.interval_timeout);
644
645 wake_lock(&device->idle_wakelock);
646 pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
647 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
648}
649EXPORT_SYMBOL(kgsl_pwrctrl_wake);
650
651void kgsl_pwrctrl_enable(struct kgsl_device *device)
652{
653 /* Order pwrrail/clk sequence based upon platform */
654 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
655 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
656 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
657}
658EXPORT_SYMBOL(kgsl_pwrctrl_enable);
659
660void kgsl_pwrctrl_disable(struct kgsl_device *device)
661{
662 /* Order pwrrail/clk sequence based upon platform */
663 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
664 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
665 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
666}
667EXPORT_SYMBOL(kgsl_pwrctrl_disable);