blob: 572e0e8e8ab7292f8e6a4b1f5235715b82136199 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
20
21#define GPU_SWFI_LATENCY 3
22
23void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
24 unsigned int new_level)
25{
26 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
27 if (new_level < (pwr->num_pwrlevels - 1) &&
28 new_level >= pwr->thermal_pwrlevel &&
29 new_level != pwr->active_pwrlevel) {
30 pwr->active_pwrlevel = new_level;
31 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
32 clk_set_rate(pwr->grp_clks[0],
33 pwr->pwrlevels[pwr->active_pwrlevel].
34 gpu_freq);
35 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
36 if (pwr->pcl)
37 msm_bus_scale_client_update_request(pwr->pcl,
38 pwr->pwrlevels[pwr->active_pwrlevel].
39 bus_freq);
40 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
41 pwr->active_pwrlevel);
42 }
43}
44EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
45
46static int __gpuclk_store(int max, struct device *dev,
47 struct device_attribute *attr,
48 const char *buf, size_t count)
49{ int ret, i, delta = 5000000;
50 unsigned long val;
51 struct kgsl_device *device = kgsl_device_from_dev(dev);
52 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
53
54 ret = sscanf(buf, "%ld", &val);
55 if (ret != 1)
56 return count;
57
58 mutex_lock(&device->mutex);
59 for (i = 0; i < pwr->num_pwrlevels; i++) {
60 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
61 if (max)
62 pwr->thermal_pwrlevel = i;
63 break;
64 }
65 }
66
67 if (i == pwr->num_pwrlevels)
68 goto done;
69
70 /*
71 * If the current or requested clock speed is greater than the
72 * thermal limit, bump down immediately.
73 */
74
75 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
76 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
77 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
78 else if (!max)
79 kgsl_pwrctrl_pwrlevel_change(device, i);
80
81done:
82 mutex_unlock(&device->mutex);
83 return count;
84}
85
86static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
87 struct device_attribute *attr,
88 const char *buf, size_t count)
89{
90 return __gpuclk_store(1, dev, attr, buf, count);
91}
92
93static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
94 struct device_attribute *attr,
95 char *buf)
96{
97 struct kgsl_device *device = kgsl_device_from_dev(dev);
98 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
99 return snprintf(buf, PAGE_SIZE, "%d\n",
100 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
101}
102
103static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
104 struct device_attribute *attr,
105 const char *buf, size_t count)
106{
107 return __gpuclk_store(0, dev, attr, buf, count);
108}
109
110static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113{
114 struct kgsl_device *device = kgsl_device_from_dev(dev);
115 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
116 return snprintf(buf, PAGE_SIZE, "%d\n",
117 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
118}
119
120static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
121 struct device_attribute *attr,
122 const char *buf, size_t count)
123{
124 char temp[20];
125 unsigned long val;
126 struct kgsl_device *device = kgsl_device_from_dev(dev);
127 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
128 int rc;
129
130 snprintf(temp, sizeof(temp), "%.*s",
131 (int)min(count, sizeof(temp) - 1), buf);
132 rc = strict_strtoul(temp, 0, &val);
133 if (rc)
134 return rc;
135
136 mutex_lock(&device->mutex);
137
138 if (val == 1)
139 pwr->nap_allowed = true;
140 else if (val == 0)
141 pwr->nap_allowed = false;
142
143 mutex_unlock(&device->mutex);
144
145 return count;
146}
147
148static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
149 struct device_attribute *attr,
150 char *buf)
151{
152 struct kgsl_device *device = kgsl_device_from_dev(dev);
153 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
154 return sprintf(buf, "%d\n", pwr->nap_allowed);
155}
156
157
158static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
159 struct device_attribute *attr,
160 const char *buf, size_t count)
161{
162 char temp[20];
163 unsigned long val;
164 struct kgsl_device *device = kgsl_device_from_dev(dev);
165 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
166 const long div = 1000/HZ;
167 static unsigned int org_interval_timeout = 1;
168 int rc;
169
170 snprintf(temp, sizeof(temp), "%.*s",
171 (int)min(count, sizeof(temp) - 1), buf);
172 rc = strict_strtoul(temp, 0, &val);
173 if (rc)
174 return rc;
175
176 if (org_interval_timeout == 1)
177 org_interval_timeout = pwr->interval_timeout;
178
179 mutex_lock(&device->mutex);
180
181 /* Let the timeout be requested in ms, but convert to jiffies. */
182 val /= div;
183 if (val >= org_interval_timeout)
184 pwr->interval_timeout = val;
185
186 mutex_unlock(&device->mutex);
187
188 return count;
189}
190
191static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
192 struct device_attribute *attr,
193 char *buf)
194{
195 struct kgsl_device *device = kgsl_device_from_dev(dev);
196 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
197 return sprintf(buf, "%d\n", pwr->interval_timeout);
198}
199
200DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
201DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
202 kgsl_pwrctrl_max_gpuclk_store);
203DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
204DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
205 kgsl_pwrctrl_idle_timer_store);
206
207static const struct device_attribute *pwrctrl_attr_list[] = {
208 &dev_attr_gpuclk,
209 &dev_attr_max_gpuclk,
210 &dev_attr_pwrnap,
211 &dev_attr_idle_timer,
212 NULL
213};
214
215int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
216{
217 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
218}
219
220void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
221{
222 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
223}
224
225void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
226{
227 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
228 int i = 0;
229 if (state == KGSL_PWRFLAGS_OFF) {
230 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
231 &pwr->power_flags)) {
232 KGSL_PWR_INFO(device,
233 "clocks off, device %d\n", device->id);
234 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
235 if (pwr->grp_clks[i])
236 clk_disable(pwr->grp_clks[i]);
237 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
238 (device->requested_state != KGSL_STATE_NAP))
239 clk_set_rate(pwr->grp_clks[0],
240 pwr->pwrlevels[pwr->num_pwrlevels - 1].
241 gpu_freq);
242 }
243 } else if (state == KGSL_PWRFLAGS_ON) {
244 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
245 &pwr->power_flags)) {
246 KGSL_PWR_INFO(device,
247 "clocks on, device %d\n", device->id);
248
249 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
250 (device->state != KGSL_STATE_NAP))
251 clk_set_rate(pwr->grp_clks[0],
252 pwr->pwrlevels[pwr->active_pwrlevel].
253 gpu_freq);
254
255 /* as last step, enable grp_clk
256 this is to let GPU interrupt to come */
257 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
258 if (pwr->grp_clks[i])
259 clk_enable(pwr->grp_clks[i]);
260 }
261 }
262}
263EXPORT_SYMBOL(kgsl_pwrctrl_clk);
264
265void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
266{
267 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
268
269 if (state == KGSL_PWRFLAGS_OFF) {
270 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
271 &pwr->power_flags)) {
272 KGSL_PWR_INFO(device,
273 "axi off, device %d\n", device->id);
274 if (pwr->ebi1_clk)
275 clk_disable(pwr->ebi1_clk);
276 if (pwr->pcl)
277 msm_bus_scale_client_update_request(pwr->pcl,
278 0);
279 }
280 } else if (state == KGSL_PWRFLAGS_ON) {
281 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
282 &pwr->power_flags)) {
283 KGSL_PWR_INFO(device,
284 "axi on, device %d\n", device->id);
285 if (pwr->ebi1_clk)
286 clk_enable(pwr->ebi1_clk);
287 if (pwr->pcl)
288 msm_bus_scale_client_update_request(pwr->pcl,
289 pwr->pwrlevels[pwr->active_pwrlevel].
290 bus_freq);
291 }
292 }
293}
294EXPORT_SYMBOL(kgsl_pwrctrl_axi);
295
296
297void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
298{
299 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
300
301 if (state == KGSL_PWRFLAGS_OFF) {
302 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
303 &pwr->power_flags)) {
304 KGSL_PWR_INFO(device,
305 "power off, device %d\n", device->id);
306 if (pwr->gpu_reg)
307 regulator_disable(pwr->gpu_reg);
308 }
309 } else if (state == KGSL_PWRFLAGS_ON) {
310 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
311 &pwr->power_flags)) {
312 KGSL_PWR_INFO(device,
313 "power on, device %d\n", device->id);
314 if (pwr->gpu_reg)
315 regulator_enable(pwr->gpu_reg);
316 }
317 }
318}
319EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
320
321void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
322{
323 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
324
325 if (state == KGSL_PWRFLAGS_ON) {
326 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
327 &pwr->power_flags)) {
328 KGSL_PWR_INFO(device,
329 "irq on, device %d\n", device->id);
330 enable_irq(pwr->interrupt_num);
331 device->ftbl->irqctrl(device, 1);
332 }
333 } else if (state == KGSL_PWRFLAGS_OFF) {
334 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
335 &pwr->power_flags)) {
336 KGSL_PWR_INFO(device,
337 "irq off, device %d\n", device->id);
338 device->ftbl->irqctrl(device, 0);
339 disable_irq(pwr->interrupt_num);
340 }
341 }
342}
343EXPORT_SYMBOL(kgsl_pwrctrl_irq);
344
345int kgsl_pwrctrl_init(struct kgsl_device *device)
346{
347 int i, result = 0;
348 struct clk *clk;
349 struct platform_device *pdev =
350 container_of(device->parentdev, struct platform_device, dev);
351 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
352 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
353 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
354 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
355 pdata_dev->clk.name.clk,
356 pdata_dev->clk.name.pclk,
357 pdata_dev->imem_clk_name.clk,
358 pdata_dev->imem_clk_name.pclk};
359
360 /*acquire clocks */
361 for (i = 1; i < KGSL_MAX_CLKS; i++) {
362 if (clk_names[i]) {
363 clk = clk_get(&pdev->dev, clk_names[i]);
364 if (IS_ERR(clk))
365 goto clk_err;
366 pwr->grp_clks[i] = clk;
367 }
368 }
369 /* Make sure we have a source clk for freq setting */
370 clk = clk_get(&pdev->dev, clk_names[0]);
371 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
372
373 /* put the AXI bus into asynchronous mode with the graphics cores */
374 if (pdata_pwr->set_grp_async != NULL)
375 pdata_pwr->set_grp_async();
376
377 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
378 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
379 pdata_pwr->num_levels);
380 result = -EINVAL;
381 goto done;
382 }
383 pwr->num_pwrlevels = pdata_pwr->num_levels;
384 pwr->active_pwrlevel = pdata_pwr->init_level;
385 for (i = 0; i < pdata_pwr->num_levels; i++) {
386 pwr->pwrlevels[i].gpu_freq =
387 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
388 clk_round_rate(pwr->grp_clks[0],
389 pdata_pwr->pwrlevel[i].
390 gpu_freq) : 0;
391 pwr->pwrlevels[i].bus_freq =
392 pdata_pwr->pwrlevel[i].bus_freq;
393 }
394 /* Do not set_rate for targets in sync with AXI */
395 if (pwr->pwrlevels[0].gpu_freq > 0)
396 clk_set_rate(pwr->grp_clks[0], pwr->
397 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
398
399 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
400 if (IS_ERR(pwr->gpu_reg))
401 pwr->gpu_reg = NULL;
402
403 pwr->power_flags = 0;
404
405 pwr->nap_allowed = pdata_pwr->nap_allowed;
406 pwr->interval_timeout = pdata_pwr->idle_timeout;
407 pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
408 if (IS_ERR(pwr->ebi1_clk))
409 pwr->ebi1_clk = NULL;
410 else
411 clk_set_rate(pwr->ebi1_clk,
412 pwr->pwrlevels[pwr->active_pwrlevel].
413 bus_freq);
414 if (pdata_dev->clk.bus_scale_table != NULL) {
415 pwr->pcl =
416 msm_bus_scale_register_client(pdata_dev->clk.
417 bus_scale_table);
418 if (!pwr->pcl) {
419 KGSL_PWR_ERR(device,
420 "msm_bus_scale_register_client failed: "
421 "id %d table %p", device->id,
422 pdata_dev->clk.bus_scale_table);
423 result = -EINVAL;
424 goto done;
425 }
426 }
427
428 /*acquire interrupt */
429 pwr->interrupt_num =
430 platform_get_irq_byname(pdev, pwr->irq_name);
431
432 if (pwr->interrupt_num <= 0) {
433 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
434 pwr->interrupt_num);
435 result = -EINVAL;
436 goto done;
437 }
438
439 register_early_suspend(&device->display_off);
440 return result;
441
442clk_err:
443 result = PTR_ERR(clk);
444 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
445 clk_names[i], result);
446
447done:
448 return result;
449}
450
451void kgsl_pwrctrl_close(struct kgsl_device *device)
452{
453 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
454 int i;
455
456 KGSL_PWR_INFO(device, "close device %d\n", device->id);
457
458 unregister_early_suspend(&device->display_off);
459
460 if (pwr->interrupt_num > 0) {
461 if (pwr->have_irq) {
462 free_irq(pwr->interrupt_num, NULL);
463 pwr->have_irq = 0;
464 }
465 pwr->interrupt_num = 0;
466 }
467
468 clk_put(pwr->ebi1_clk);
469
470 if (pwr->pcl)
471 msm_bus_scale_unregister_client(pwr->pcl);
472
473 pwr->pcl = 0;
474
475 if (pwr->gpu_reg) {
476 regulator_put(pwr->gpu_reg);
477 pwr->gpu_reg = NULL;
478 }
479
480 for (i = 1; i < KGSL_MAX_CLKS; i++)
481 if (pwr->grp_clks[i]) {
482 clk_put(pwr->grp_clks[i]);
483 pwr->grp_clks[i] = NULL;
484 }
485
486 pwr->grp_clks[0] = NULL;
487 pwr->power_flags = 0;
488}
489
490void kgsl_idle_check(struct work_struct *work)
491{
492 struct kgsl_device *device = container_of(work, struct kgsl_device,
493 idle_check_ws);
494
495 mutex_lock(&device->mutex);
496 if (device->requested_state != KGSL_STATE_SLEEP)
497 kgsl_pwrscale_idle(device);
498
499 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
500 if (kgsl_pwrctrl_sleep(device) != 0)
501 mod_timer(&device->idle_timer,
502 jiffies +
503 device->pwrctrl.interval_timeout);
504 } else if (device->state & (KGSL_STATE_HUNG |
505 KGSL_STATE_DUMP_AND_RECOVER)) {
506 device->requested_state = KGSL_STATE_NONE;
507 }
508
509 mutex_unlock(&device->mutex);
510}
511
512void kgsl_timer(unsigned long data)
513{
514 struct kgsl_device *device = (struct kgsl_device *) data;
515
516 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
517 if (device->requested_state != KGSL_STATE_SUSPEND) {
518 device->requested_state = KGSL_STATE_SLEEP;
519 /* Have work run in a non-interrupt context. */
520 queue_work(device->work_queue, &device->idle_check_ws);
521 }
522}
523
524void kgsl_pre_hwaccess(struct kgsl_device *device)
525{
526 BUG_ON(!mutex_is_locked(&device->mutex));
527 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
528 kgsl_pwrctrl_wake(device);
529}
530EXPORT_SYMBOL(kgsl_pre_hwaccess);
531
532void kgsl_check_suspended(struct kgsl_device *device)
533{
534 if (device->requested_state == KGSL_STATE_SUSPEND ||
535 device->state == KGSL_STATE_SUSPEND) {
536 mutex_unlock(&device->mutex);
537 wait_for_completion(&device->hwaccess_gate);
538 mutex_lock(&device->mutex);
539 }
540 if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
541 mutex_unlock(&device->mutex);
542 wait_for_completion(&device->recovery_gate);
543 mutex_lock(&device->mutex);
544 }
545 }
546
547
548/******************************************************************/
549/* Caller must hold the device mutex. */
550int kgsl_pwrctrl_sleep(struct kgsl_device *device)
551{
552 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
553 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
554
555 /* Work through the legal state transitions */
556 if (device->requested_state == KGSL_STATE_NAP) {
557 if (device->ftbl->isidle(device))
558 goto nap;
559 } else if (device->requested_state == KGSL_STATE_SLEEP) {
560 if (device->state == KGSL_STATE_NAP ||
561 device->ftbl->isidle(device))
562 goto sleep;
563 }
564
565 device->requested_state = KGSL_STATE_NONE;
566 return -EBUSY;
567
568sleep:
569 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
570 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
571 if (pwr->pwrlevels[0].gpu_freq > 0)
572 clk_set_rate(pwr->grp_clks[0],
573 pwr->pwrlevels[pwr->num_pwrlevels - 1].
574 gpu_freq);
575 device->pwrctrl.time = 0;
576
577 kgsl_pwrscale_sleep(device);
578 goto clk_off;
579
580nap:
581 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
582clk_off:
583 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
584
585 device->state = device->requested_state;
586 device->requested_state = KGSL_STATE_NONE;
587 wake_unlock(&device->idle_wakelock);
588 pm_qos_update_request(&device->pm_qos_req_dma,
589 PM_QOS_DEFAULT_VALUE);
590 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
591 device->state, device->id);
592
593 return 0;
594}
595EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
596
597/******************************************************************/
598/* Caller must hold the device mutex. */
599void kgsl_pwrctrl_wake(struct kgsl_device *device)
600{
601 if (device->state == KGSL_STATE_SUSPEND)
602 return;
603
604 if (device->state != KGSL_STATE_NAP) {
605 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
606 kgsl_pwrscale_wake(device);
607 }
608
609 /* Turn on the core clocks */
610 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
611
612 /* Enable state before turning on irq */
613 device->state = KGSL_STATE_ACTIVE;
614 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
615 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
616
617 /* Re-enable HW access */
618 mod_timer(&device->idle_timer,
619 jiffies + device->pwrctrl.interval_timeout);
620
621 wake_lock(&device->idle_wakelock);
622 pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
623 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
624}
625EXPORT_SYMBOL(kgsl_pwrctrl_wake);
626
627void kgsl_pwrctrl_enable(struct kgsl_device *device)
628{
629 /* Order pwrrail/clk sequence based upon platform */
630 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
631 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
632 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
633}
634EXPORT_SYMBOL(kgsl_pwrctrl_enable);
635
636void kgsl_pwrctrl_disable(struct kgsl_device *device)
637{
638 /* Order pwrrail/clk sequence based upon platform */
639 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
640 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
641 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
642}
643EXPORT_SYMBOL(kgsl_pwrctrl_disable);