blob: ab20e81efd7058f59fb90252f3bfa1debf039242 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
20
21#define GPU_SWFI_LATENCY 3
22
23void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
24 unsigned int new_level)
25{
26 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
27 if (new_level < (pwr->num_pwrlevels - 1) &&
28 new_level >= pwr->thermal_pwrlevel &&
29 new_level != pwr->active_pwrlevel) {
30 pwr->active_pwrlevel = new_level;
31 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
32 clk_set_rate(pwr->grp_clks[0],
33 pwr->pwrlevels[pwr->active_pwrlevel].
34 gpu_freq);
35 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
36 if (pwr->pcl)
37 msm_bus_scale_client_update_request(pwr->pcl,
38 pwr->pwrlevels[pwr->active_pwrlevel].
39 bus_freq);
40 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
41 pwr->active_pwrlevel);
42 }
43}
44EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
45
46static int __gpuclk_store(int max, struct device *dev,
47 struct device_attribute *attr,
48 const char *buf, size_t count)
49{ int ret, i, delta = 5000000;
50 unsigned long val;
51 struct kgsl_device *device = kgsl_device_from_dev(dev);
52 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
53
54 ret = sscanf(buf, "%ld", &val);
55 if (ret != 1)
56 return count;
57
58 mutex_lock(&device->mutex);
59 for (i = 0; i < pwr->num_pwrlevels; i++) {
60 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
61 if (max)
62 pwr->thermal_pwrlevel = i;
63 break;
64 }
65 }
66
67 if (i == pwr->num_pwrlevels)
68 goto done;
69
70 /*
71 * If the current or requested clock speed is greater than the
72 * thermal limit, bump down immediately.
73 */
74
75 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
76 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
77 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
78 else if (!max)
79 kgsl_pwrctrl_pwrlevel_change(device, i);
80
81done:
82 mutex_unlock(&device->mutex);
83 return count;
84}
85
86static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
87 struct device_attribute *attr,
88 const char *buf, size_t count)
89{
90 return __gpuclk_store(1, dev, attr, buf, count);
91}
92
93static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
94 struct device_attribute *attr,
95 char *buf)
96{
97 struct kgsl_device *device = kgsl_device_from_dev(dev);
98 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
99 return snprintf(buf, PAGE_SIZE, "%d\n",
100 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
101}
102
103static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
104 struct device_attribute *attr,
105 const char *buf, size_t count)
106{
107 return __gpuclk_store(0, dev, attr, buf, count);
108}
109
110static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113{
114 struct kgsl_device *device = kgsl_device_from_dev(dev);
115 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
116 return snprintf(buf, PAGE_SIZE, "%d\n",
117 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
118}
119
120static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
121 struct device_attribute *attr,
122 const char *buf, size_t count)
123{
124 char temp[20];
125 unsigned long val;
126 struct kgsl_device *device = kgsl_device_from_dev(dev);
127 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
128 int rc;
129
130 snprintf(temp, sizeof(temp), "%.*s",
131 (int)min(count, sizeof(temp) - 1), buf);
132 rc = strict_strtoul(temp, 0, &val);
133 if (rc)
134 return rc;
135
136 mutex_lock(&device->mutex);
137
138 if (val == 1)
139 pwr->nap_allowed = true;
140 else if (val == 0)
141 pwr->nap_allowed = false;
142
143 mutex_unlock(&device->mutex);
144
145 return count;
146}
147
148static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
149 struct device_attribute *attr,
150 char *buf)
151{
152 struct kgsl_device *device = kgsl_device_from_dev(dev);
153 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
154 return sprintf(buf, "%d\n", pwr->nap_allowed);
155}
156
157
158static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
159 struct device_attribute *attr,
160 const char *buf, size_t count)
161{
162 char temp[20];
163 unsigned long val;
164 struct kgsl_device *device = kgsl_device_from_dev(dev);
165 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
166 const long div = 1000/HZ;
167 static unsigned int org_interval_timeout = 1;
168 int rc;
169
170 snprintf(temp, sizeof(temp), "%.*s",
171 (int)min(count, sizeof(temp) - 1), buf);
172 rc = strict_strtoul(temp, 0, &val);
173 if (rc)
174 return rc;
175
176 if (org_interval_timeout == 1)
177 org_interval_timeout = pwr->interval_timeout;
178
179 mutex_lock(&device->mutex);
180
181 /* Let the timeout be requested in ms, but convert to jiffies. */
182 val /= div;
183 if (val >= org_interval_timeout)
184 pwr->interval_timeout = val;
185
186 mutex_unlock(&device->mutex);
187
188 return count;
189}
190
191static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
192 struct device_attribute *attr,
193 char *buf)
194{
195 struct kgsl_device *device = kgsl_device_from_dev(dev);
196 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
197 return sprintf(buf, "%d\n", pwr->interval_timeout);
198}
199
200DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
201DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
202 kgsl_pwrctrl_max_gpuclk_store);
203DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
204DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
205 kgsl_pwrctrl_idle_timer_store);
206
207static const struct device_attribute *pwrctrl_attr_list[] = {
208 &dev_attr_gpuclk,
209 &dev_attr_max_gpuclk,
210 &dev_attr_pwrnap,
211 &dev_attr_idle_timer,
212 NULL
213};
214
215int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
216{
217 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
218}
219
220void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
221{
222 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
223}
224
225void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
226{
227 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
228 int i = 0;
229 if (state == KGSL_PWRFLAGS_OFF) {
230 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
231 &pwr->power_flags)) {
232 KGSL_PWR_INFO(device,
233 "clocks off, device %d\n", device->id);
234 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
235 if (pwr->grp_clks[i])
236 clk_disable(pwr->grp_clks[i]);
237 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
238 (device->requested_state != KGSL_STATE_NAP))
239 clk_set_rate(pwr->grp_clks[0],
240 pwr->pwrlevels[pwr->num_pwrlevels - 1].
241 gpu_freq);
242 }
243 } else if (state == KGSL_PWRFLAGS_ON) {
244 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
245 &pwr->power_flags)) {
246 KGSL_PWR_INFO(device,
247 "clocks on, device %d\n", device->id);
248
249 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
250 (device->state != KGSL_STATE_NAP))
251 clk_set_rate(pwr->grp_clks[0],
252 pwr->pwrlevels[pwr->active_pwrlevel].
253 gpu_freq);
254
255 /* as last step, enable grp_clk
256 this is to let GPU interrupt to come */
257 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
258 if (pwr->grp_clks[i])
259 clk_enable(pwr->grp_clks[i]);
260 }
261 }
262}
263EXPORT_SYMBOL(kgsl_pwrctrl_clk);
264
265void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
266{
267 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
268
269 if (state == KGSL_PWRFLAGS_OFF) {
270 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
271 &pwr->power_flags)) {
272 KGSL_PWR_INFO(device,
273 "axi off, device %d\n", device->id);
274 if (pwr->ebi1_clk)
275 clk_disable(pwr->ebi1_clk);
276 if (pwr->pcl)
277 msm_bus_scale_client_update_request(pwr->pcl,
278 0);
279 }
280 } else if (state == KGSL_PWRFLAGS_ON) {
281 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
282 &pwr->power_flags)) {
283 KGSL_PWR_INFO(device,
284 "axi on, device %d\n", device->id);
285 if (pwr->ebi1_clk)
286 clk_enable(pwr->ebi1_clk);
287 if (pwr->pcl)
288 msm_bus_scale_client_update_request(pwr->pcl,
289 pwr->pwrlevels[pwr->active_pwrlevel].
290 bus_freq);
291 }
292 }
293}
294EXPORT_SYMBOL(kgsl_pwrctrl_axi);
295
296
297void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
298{
299 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
300
301 if (state == KGSL_PWRFLAGS_OFF) {
302 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
303 &pwr->power_flags)) {
304 KGSL_PWR_INFO(device,
305 "power off, device %d\n", device->id);
306 if (pwr->gpu_reg)
307 regulator_disable(pwr->gpu_reg);
308 }
309 } else if (state == KGSL_PWRFLAGS_ON) {
310 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
311 &pwr->power_flags)) {
312 KGSL_PWR_INFO(device,
313 "power on, device %d\n", device->id);
314 if (pwr->gpu_reg)
315 regulator_enable(pwr->gpu_reg);
316 }
317 }
318}
319EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
320
321void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
322{
323 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
324
325 if (state == KGSL_PWRFLAGS_ON) {
326 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
327 &pwr->power_flags)) {
328 KGSL_PWR_INFO(device,
329 "irq on, device %d\n", device->id);
330 enable_irq(pwr->interrupt_num);
331 device->ftbl->irqctrl(device, 1);
332 }
333 } else if (state == KGSL_PWRFLAGS_OFF) {
334 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
335 &pwr->power_flags)) {
336 KGSL_PWR_INFO(device,
337 "irq off, device %d\n", device->id);
338 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600339 if (in_interrupt())
340 disable_irq_nosync(pwr->interrupt_num);
341 else
342 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343 }
344 }
345}
346EXPORT_SYMBOL(kgsl_pwrctrl_irq);
347
348int kgsl_pwrctrl_init(struct kgsl_device *device)
349{
350 int i, result = 0;
351 struct clk *clk;
352 struct platform_device *pdev =
353 container_of(device->parentdev, struct platform_device, dev);
354 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
355 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
356 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
357 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
358 pdata_dev->clk.name.clk,
359 pdata_dev->clk.name.pclk,
360 pdata_dev->imem_clk_name.clk,
361 pdata_dev->imem_clk_name.pclk};
362
363 /*acquire clocks */
364 for (i = 1; i < KGSL_MAX_CLKS; i++) {
365 if (clk_names[i]) {
366 clk = clk_get(&pdev->dev, clk_names[i]);
367 if (IS_ERR(clk))
368 goto clk_err;
369 pwr->grp_clks[i] = clk;
370 }
371 }
372 /* Make sure we have a source clk for freq setting */
373 clk = clk_get(&pdev->dev, clk_names[0]);
374 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
375
376 /* put the AXI bus into asynchronous mode with the graphics cores */
377 if (pdata_pwr->set_grp_async != NULL)
378 pdata_pwr->set_grp_async();
379
380 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
381 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
382 pdata_pwr->num_levels);
383 result = -EINVAL;
384 goto done;
385 }
386 pwr->num_pwrlevels = pdata_pwr->num_levels;
387 pwr->active_pwrlevel = pdata_pwr->init_level;
388 for (i = 0; i < pdata_pwr->num_levels; i++) {
389 pwr->pwrlevels[i].gpu_freq =
390 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
391 clk_round_rate(pwr->grp_clks[0],
392 pdata_pwr->pwrlevel[i].
393 gpu_freq) : 0;
394 pwr->pwrlevels[i].bus_freq =
395 pdata_pwr->pwrlevel[i].bus_freq;
396 }
397 /* Do not set_rate for targets in sync with AXI */
398 if (pwr->pwrlevels[0].gpu_freq > 0)
399 clk_set_rate(pwr->grp_clks[0], pwr->
400 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
401
402 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
403 if (IS_ERR(pwr->gpu_reg))
404 pwr->gpu_reg = NULL;
405
406 pwr->power_flags = 0;
407
408 pwr->nap_allowed = pdata_pwr->nap_allowed;
409 pwr->interval_timeout = pdata_pwr->idle_timeout;
410 pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
411 if (IS_ERR(pwr->ebi1_clk))
412 pwr->ebi1_clk = NULL;
413 else
414 clk_set_rate(pwr->ebi1_clk,
415 pwr->pwrlevels[pwr->active_pwrlevel].
416 bus_freq);
417 if (pdata_dev->clk.bus_scale_table != NULL) {
418 pwr->pcl =
419 msm_bus_scale_register_client(pdata_dev->clk.
420 bus_scale_table);
421 if (!pwr->pcl) {
422 KGSL_PWR_ERR(device,
423 "msm_bus_scale_register_client failed: "
424 "id %d table %p", device->id,
425 pdata_dev->clk.bus_scale_table);
426 result = -EINVAL;
427 goto done;
428 }
429 }
430
431 /*acquire interrupt */
432 pwr->interrupt_num =
433 platform_get_irq_byname(pdev, pwr->irq_name);
434
435 if (pwr->interrupt_num <= 0) {
436 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
437 pwr->interrupt_num);
438 result = -EINVAL;
439 goto done;
440 }
441
442 register_early_suspend(&device->display_off);
443 return result;
444
445clk_err:
446 result = PTR_ERR(clk);
447 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
448 clk_names[i], result);
449
450done:
451 return result;
452}
453
454void kgsl_pwrctrl_close(struct kgsl_device *device)
455{
456 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
457 int i;
458
459 KGSL_PWR_INFO(device, "close device %d\n", device->id);
460
461 unregister_early_suspend(&device->display_off);
462
463 if (pwr->interrupt_num > 0) {
464 if (pwr->have_irq) {
465 free_irq(pwr->interrupt_num, NULL);
466 pwr->have_irq = 0;
467 }
468 pwr->interrupt_num = 0;
469 }
470
471 clk_put(pwr->ebi1_clk);
472
473 if (pwr->pcl)
474 msm_bus_scale_unregister_client(pwr->pcl);
475
476 pwr->pcl = 0;
477
478 if (pwr->gpu_reg) {
479 regulator_put(pwr->gpu_reg);
480 pwr->gpu_reg = NULL;
481 }
482
483 for (i = 1; i < KGSL_MAX_CLKS; i++)
484 if (pwr->grp_clks[i]) {
485 clk_put(pwr->grp_clks[i]);
486 pwr->grp_clks[i] = NULL;
487 }
488
489 pwr->grp_clks[0] = NULL;
490 pwr->power_flags = 0;
491}
492
493void kgsl_idle_check(struct work_struct *work)
494{
495 struct kgsl_device *device = container_of(work, struct kgsl_device,
496 idle_check_ws);
497
498 mutex_lock(&device->mutex);
499 if (device->requested_state != KGSL_STATE_SLEEP)
500 kgsl_pwrscale_idle(device);
501
502 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
503 if (kgsl_pwrctrl_sleep(device) != 0)
504 mod_timer(&device->idle_timer,
505 jiffies +
506 device->pwrctrl.interval_timeout);
507 } else if (device->state & (KGSL_STATE_HUNG |
508 KGSL_STATE_DUMP_AND_RECOVER)) {
509 device->requested_state = KGSL_STATE_NONE;
510 }
511
512 mutex_unlock(&device->mutex);
513}
514
515void kgsl_timer(unsigned long data)
516{
517 struct kgsl_device *device = (struct kgsl_device *) data;
518
519 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
520 if (device->requested_state != KGSL_STATE_SUSPEND) {
521 device->requested_state = KGSL_STATE_SLEEP;
522 /* Have work run in a non-interrupt context. */
523 queue_work(device->work_queue, &device->idle_check_ws);
524 }
525}
526
527void kgsl_pre_hwaccess(struct kgsl_device *device)
528{
529 BUG_ON(!mutex_is_locked(&device->mutex));
530 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
531 kgsl_pwrctrl_wake(device);
532}
533EXPORT_SYMBOL(kgsl_pre_hwaccess);
534
535void kgsl_check_suspended(struct kgsl_device *device)
536{
537 if (device->requested_state == KGSL_STATE_SUSPEND ||
538 device->state == KGSL_STATE_SUSPEND) {
539 mutex_unlock(&device->mutex);
540 wait_for_completion(&device->hwaccess_gate);
541 mutex_lock(&device->mutex);
542 }
543 if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
544 mutex_unlock(&device->mutex);
545 wait_for_completion(&device->recovery_gate);
546 mutex_lock(&device->mutex);
547 }
548 }
549
550
551/******************************************************************/
552/* Caller must hold the device mutex. */
553int kgsl_pwrctrl_sleep(struct kgsl_device *device)
554{
555 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
556 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
557
558 /* Work through the legal state transitions */
559 if (device->requested_state == KGSL_STATE_NAP) {
560 if (device->ftbl->isidle(device))
561 goto nap;
562 } else if (device->requested_state == KGSL_STATE_SLEEP) {
563 if (device->state == KGSL_STATE_NAP ||
564 device->ftbl->isidle(device))
565 goto sleep;
566 }
567
568 device->requested_state = KGSL_STATE_NONE;
569 return -EBUSY;
570
571sleep:
572 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
573 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
574 if (pwr->pwrlevels[0].gpu_freq > 0)
575 clk_set_rate(pwr->grp_clks[0],
576 pwr->pwrlevels[pwr->num_pwrlevels - 1].
577 gpu_freq);
578 device->pwrctrl.time = 0;
579
580 kgsl_pwrscale_sleep(device);
581 goto clk_off;
582
583nap:
584 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
585clk_off:
586 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
587
588 device->state = device->requested_state;
589 device->requested_state = KGSL_STATE_NONE;
590 wake_unlock(&device->idle_wakelock);
591 pm_qos_update_request(&device->pm_qos_req_dma,
592 PM_QOS_DEFAULT_VALUE);
593 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
594 device->state, device->id);
595
596 return 0;
597}
598EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
599
600/******************************************************************/
601/* Caller must hold the device mutex. */
602void kgsl_pwrctrl_wake(struct kgsl_device *device)
603{
604 if (device->state == KGSL_STATE_SUSPEND)
605 return;
606
607 if (device->state != KGSL_STATE_NAP) {
608 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
609 kgsl_pwrscale_wake(device);
610 }
611
612 /* Turn on the core clocks */
613 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
614
615 /* Enable state before turning on irq */
616 device->state = KGSL_STATE_ACTIVE;
617 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
618 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
619
620 /* Re-enable HW access */
621 mod_timer(&device->idle_timer,
622 jiffies + device->pwrctrl.interval_timeout);
623
624 wake_lock(&device->idle_wakelock);
625 pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
626 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
627}
628EXPORT_SYMBOL(kgsl_pwrctrl_wake);
629
630void kgsl_pwrctrl_enable(struct kgsl_device *device)
631{
632 /* Order pwrrail/clk sequence based upon platform */
633 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
634 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
635 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
636}
637EXPORT_SYMBOL(kgsl_pwrctrl_enable);
638
639void kgsl_pwrctrl_disable(struct kgsl_device *device)
640{
641 /* Order pwrrail/clk sequence based upon platform */
642 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
643 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
644 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
645}
646EXPORT_SYMBOL(kgsl_pwrctrl_disable);