blob: 7ee305f10109c294e277fc2215accfc5fac5aee0 [file] [log] [blame]
Steve Kondikf7652b32013-11-26 15:20:51 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/export.h>
15#include <linux/interrupt.h>
16#include <asm/page.h>
17#include <linux/pm_runtime.h>
18#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
20#include <linux/ktime.h>
21#include <linux/delay.h>
22
23#include "kgsl.h"
24#include "kgsl_pwrscale.h"
25#include "kgsl_device.h"
26#include "kgsl_trace.h"
27#include "kgsl_sharedmem.h"
28
29#define KGSL_PWRFLAGS_POWER_ON 0
30#define KGSL_PWRFLAGS_CLK_ON 1
31#define KGSL_PWRFLAGS_AXI_ON 2
32#define KGSL_PWRFLAGS_IRQ_ON 3
33
34#define UPDATE_BUSY_VAL 1000000
35#define UPDATE_BUSY 50
36
37/*
38 * Expected delay for post-interrupt processing on A3xx.
39 * The delay may be longer, gradually increase the delay
40 * to compensate. If the GPU isn't done by max delay,
41 * it's working on something other than just the final
42 * command sequence so stop waiting for it to be idle.
43 */
44#define INIT_UDELAY 200
45#define MAX_UDELAY 2000
46
47struct clk_pair {
48 const char *name;
49 uint map;
50};
51
52struct clk_pair clks[KGSL_MAX_CLKS] = {
53 {
54 .name = "src_clk",
55 .map = KGSL_CLK_SRC,
56 },
57 {
58 .name = "core_clk",
59 .map = KGSL_CLK_CORE,
60 },
61 {
62 .name = "iface_clk",
63 .map = KGSL_CLK_IFACE,
64 },
65 {
66 .name = "mem_clk",
67 .map = KGSL_CLK_MEM,
68 },
69 {
70 .name = "mem_iface_clk",
71 .map = KGSL_CLK_MEM_IFACE,
72 },
73 {
74 .name = "alt_mem_iface_clk",
75 .map = KGSL_CLK_ALT_MEM_IFACE,
76 },
77};
78
79static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
80 int requested_state);
81static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
82static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
83
84/* Update the elapsed time at a particular clock level
85 * if the device is active(on_time = true).Otherwise
86 * store it as sleep time.
87 */
88static void update_clk_statistics(struct kgsl_device *device,
89 bool on_time)
90{
91 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
92 struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
93 ktime_t elapsed;
94 int elapsed_us;
95 if (clkstats->start.tv64 == 0)
96 clkstats->start = ktime_get();
97 clkstats->stop = ktime_get();
98 elapsed = ktime_sub(clkstats->stop, clkstats->start);
99 elapsed_us = ktime_to_us(elapsed);
100 clkstats->elapsed += elapsed_us;
101 if (on_time)
102 clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
103 else
104 clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
105 clkstats->start = ktime_get();
106}
107
108/*
109 * Given a requested power level do bounds checking on the constraints and
110 * return the nearest possible level
111 */
112
113static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
114{
115 int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
116 int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
117
118 if (level < max_pwrlevel)
119 return max_pwrlevel;
120 if (level > min_pwrlevel)
121 return min_pwrlevel;
122
123 return level;
124}
125
126void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
127 unsigned int new_level)
128{
129 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
130 struct kgsl_pwrlevel *pwrlevel;
131 int delta;
132 int level;
133
134 /* Adjust the power level to the current constraints */
135 new_level = _adjust_pwrlevel(pwr, new_level);
136
137 if (new_level == pwr->active_pwrlevel)
138 return;
139
140 delta = new_level < pwr->active_pwrlevel ? -1 : 1;
141
142 update_clk_statistics(device, true);
143
144 level = pwr->active_pwrlevel;
145
146 /*
147 * Set the active powerlevel first in case the clocks are off - if we
148 * don't do this then the pwrlevel change won't take effect when the
149 * clocks come back
150 */
151
152 pwr->active_pwrlevel = new_level;
153 pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
154
155 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
156
157 if (pwr->pcl)
158 msm_bus_scale_client_update_request(pwr->pcl,
159 pwrlevel->bus_freq);
160 else if (pwr->ebi1_clk)
161 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
162 }
163
164 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
165 (device->state == KGSL_STATE_NAP)) {
166
167 /*
168 * On some platforms, instability is caused on
169 * changing clock freq when the core is busy.
170 * Idle the gpu core before changing the clock freq.
171 */
172
173 if (pwr->idle_needed == true)
174 device->ftbl->idle(device);
175
176 /*
177 * Don't shift by more than one level at a time to
178 * avoid glitches.
179 */
180
181 while (level != new_level) {
182 level += delta;
183
184 clk_set_rate(pwr->grp_clks[0],
185 pwr->pwrlevels[level].gpu_freq);
186 }
187 }
188
189
190 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
191}
192
193EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
194
195static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
196 struct device_attribute *attr,
197 const char *buf, size_t count)
198{
199 struct kgsl_device *device = kgsl_device_from_dev(dev);
200 struct kgsl_pwrctrl *pwr;
201 int ret, level;
202
203 if (device == NULL)
204 return 0;
205
206 pwr = &device->pwrctrl;
207
208 ret = sscanf(buf, "%d", &level);
209 if (ret != 1)
210 return count;
211
212 if (level < 0)
213 return count;
214
215 mutex_lock(&device->mutex);
216
217 if (level > pwr->num_pwrlevels - 2)
218 level = pwr->num_pwrlevels - 2;
219
220 pwr->thermal_pwrlevel = level;
221
222 /*
223 * If there is no power policy set the clock to the requested thermal
224 * level - if thermal now happens to be higher than max, then that will
225 * be limited by the pwrlevel change function. Otherwise if there is
226 * a policy only change the active clock if it is higher then the new
227 * thermal level
228 */
229
230 if (device->pwrscale.policy == NULL ||
231 pwr->thermal_pwrlevel > pwr->active_pwrlevel)
232 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
233
234 mutex_unlock(&device->mutex);
235
236 return count;
237}
238
239static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
240 struct device_attribute *attr,
241 char *buf)
242{
243
244 struct kgsl_device *device = kgsl_device_from_dev(dev);
245 struct kgsl_pwrctrl *pwr;
246 if (device == NULL)
247 return 0;
248 pwr = &device->pwrctrl;
249 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
250}
251
252static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
253 struct device_attribute *attr,
254 const char *buf, size_t count)
255{
256 struct kgsl_device *device = kgsl_device_from_dev(dev);
257 struct kgsl_pwrctrl *pwr;
258 int ret, level, max_level;
259
260 if (device == NULL)
261 return 0;
262
263 pwr = &device->pwrctrl;
264
265 ret = sscanf(buf, "%d", &level);
266 if (ret != 1)
267 return count;
268
269 /* If the use specifies a negative number, then don't change anything */
270 if (level < 0)
271 return count;
272
273 mutex_lock(&device->mutex);
274
275 /* You can't set a maximum power level lower than the minimum */
276 if (level > pwr->min_pwrlevel)
277 level = pwr->min_pwrlevel;
278
279 pwr->max_pwrlevel = level;
280
281
282 max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
283
284 /*
285 * If there is no policy then move to max by default. Otherwise only
286 * move max if the current level happens to be higher then the new max
287 */
288
289 if (device->pwrscale.policy == NULL ||
290 (max_level > pwr->active_pwrlevel))
291 kgsl_pwrctrl_pwrlevel_change(device, max_level);
292
293 mutex_unlock(&device->mutex);
294
295 return count;
296}
297
298static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
299 struct device_attribute *attr,
300 char *buf)
301{
302
303 struct kgsl_device *device = kgsl_device_from_dev(dev);
304 struct kgsl_pwrctrl *pwr;
305 if (device == NULL)
306 return 0;
307 pwr = &device->pwrctrl;
308 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
309}
310
311static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
312 struct device_attribute *attr,
313 const char *buf, size_t count)
314{ struct kgsl_device *device = kgsl_device_from_dev(dev);
315 struct kgsl_pwrctrl *pwr;
316 int ret, level, min_level;
317
318 if (device == NULL)
319 return 0;
320
321 pwr = &device->pwrctrl;
322
323 ret = sscanf(buf, "%d", &level);
324 if (ret != 1)
325 return count;
326
327 /* Don't do anything on obviously incorrect values */
328 if (level < 0)
329 return count;
330
331 mutex_lock(&device->mutex);
332 if (level > pwr->num_pwrlevels - 2)
333 level = pwr->num_pwrlevels - 2;
334
335 /* You can't set a minimum power level lower than the maximum */
336 if (level < pwr->max_pwrlevel)
337 level = pwr->max_pwrlevel;
338
339 pwr->min_pwrlevel = level;
340
341 min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
342
343 /* Only move the power level higher if minimum is higher then the
344 * current level
345 */
346
347 if (min_level < pwr->active_pwrlevel)
348 kgsl_pwrctrl_pwrlevel_change(device, min_level);
349
350 mutex_unlock(&device->mutex);
351
352 return count;
353}
354
355static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
356 struct device_attribute *attr,
357 char *buf)
358{
359 struct kgsl_device *device = kgsl_device_from_dev(dev);
360 struct kgsl_pwrctrl *pwr;
361 if (device == NULL)
362 return 0;
363 pwr = &device->pwrctrl;
364 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
365}
366
367static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
368 struct device_attribute *attr,
369 char *buf)
370{
371
372 struct kgsl_device *device = kgsl_device_from_dev(dev);
373 struct kgsl_pwrctrl *pwr;
374 if (device == NULL)
375 return 0;
376 pwr = &device->pwrctrl;
377 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
378}
379
380/* Given a GPU clock value, return the lowest matching powerlevel */
381
382static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
383{
384 int i;
385
386 for (i = pwr->num_pwrlevels - 1; i >= 0; i--) {
387 if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
388 return i;
389 }
390
391 return -ERANGE;
392}
393
394static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
395 struct device_attribute *attr,
396 const char *buf, size_t count)
397{
398 struct kgsl_device *device = kgsl_device_from_dev(dev);
399 struct kgsl_pwrctrl *pwr;
400 unsigned long val;
401 int ret, level;
402
403 if (device == NULL)
404 return 0;
405
406 pwr = &device->pwrctrl;
407
408 ret = sscanf(buf, "%ld", &val);
409 if (ret != 1)
410 return count;
411
412 mutex_lock(&device->mutex);
413 level = _get_nearest_pwrlevel(pwr, val);
414 if (level < 0)
415 goto done;
416
417 pwr->thermal_pwrlevel = level;
418
419 /*
420 * if the thermal limit is lower than the current setting,
421 * move the speed down immediately
422 */
423
424 if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
425 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
426
427done:
428 mutex_unlock(&device->mutex);
429 return count;
430}
431
432static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
433 struct device_attribute *attr,
434 char *buf)
435{
436
437 struct kgsl_device *device = kgsl_device_from_dev(dev);
438 struct kgsl_pwrctrl *pwr;
439 if (device == NULL)
440 return 0;
441 pwr = &device->pwrctrl;
442 return snprintf(buf, PAGE_SIZE, "%d\n",
443 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
444}
445
446static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
447 struct device_attribute *attr,
448 const char *buf, size_t count)
449{
450 struct kgsl_device *device = kgsl_device_from_dev(dev);
451 struct kgsl_pwrctrl *pwr;
452 unsigned long val;
453 int ret, level;
454
455 if (device == NULL)
456 return 0;
457
458 pwr = &device->pwrctrl;
459
460 ret = sscanf(buf, "%ld", &val);
461 if (ret != 1)
462 return count;
463
464 mutex_lock(&device->mutex);
465 level = _get_nearest_pwrlevel(pwr, val);
466 if (level >= 0)
467 kgsl_pwrctrl_pwrlevel_change(device, level);
468
469 mutex_unlock(&device->mutex);
470 return count;
471}
472
473static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476{
477 struct kgsl_device *device = kgsl_device_from_dev(dev);
478 struct kgsl_pwrctrl *pwr;
479 if (device == NULL)
480 return 0;
481 pwr = &device->pwrctrl;
482 return snprintf(buf, PAGE_SIZE, "%d\n",
483 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
484}
485
486static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
487 struct device_attribute *attr,
488 const char *buf, size_t count)
489{
490 char temp[20];
491 unsigned long val;
492 struct kgsl_device *device = kgsl_device_from_dev(dev);
493 struct kgsl_pwrctrl *pwr;
494 const long div = 1000/HZ;
495 int rc;
496
497 if (device == NULL)
498 return 0;
499 pwr = &device->pwrctrl;
500
501 snprintf(temp, sizeof(temp), "%.*s",
502 (int)min(count, sizeof(temp) - 1), buf);
503 rc = strict_strtoul(temp, 0, &val);
504 if (rc)
505 return rc;
506
507 mutex_lock(&device->mutex);
508
509 /* Let the timeout be requested in ms, but convert to jiffies. */
510 val /= div;
511 pwr->interval_timeout = val;
512
513 mutex_unlock(&device->mutex);
514
515 return count;
516}
517
518static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
519 struct device_attribute *attr,
520 char *buf)
521{
522 struct kgsl_device *device = kgsl_device_from_dev(dev);
523 int mul = 1000/HZ;
524 if (device == NULL)
525 return 0;
526 /* Show the idle_timeout converted to msec */
527 return snprintf(buf, PAGE_SIZE, "%d\n",
528 device->pwrctrl.interval_timeout * mul);
529}
530
531static int kgsl_pwrctrl_pmqos_latency_store(struct device *dev,
532 struct device_attribute *attr,
533 const char *buf, size_t count)
534{
535 char temp[20];
536 unsigned long val;
537 struct kgsl_device *device = kgsl_device_from_dev(dev);
538 int rc;
539
540 if (device == NULL)
541 return 0;
542
543 snprintf(temp, sizeof(temp), "%.*s",
544 (int)min(count, sizeof(temp) - 1), buf);
545 rc = kstrtoul(temp, 0, &val);
546 if (rc)
547 return rc;
548
549 mutex_lock(&device->mutex);
550 device->pwrctrl.pm_qos_latency = val;
551 mutex_unlock(&device->mutex);
552
553 return count;
554}
555
556static int kgsl_pwrctrl_pmqos_latency_show(struct device *dev,
557 struct device_attribute *attr,
558 char *buf)
559{
560 struct kgsl_device *device = kgsl_device_from_dev(dev);
561 if (device == NULL)
562 return 0;
563 return snprintf(buf, PAGE_SIZE, "%d\n",
564 device->pwrctrl.pm_qos_latency);
565}
566
567static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
568 struct device_attribute *attr,
569 char *buf)
570{
571 int ret;
572 struct kgsl_device *device = kgsl_device_from_dev(dev);
573 struct kgsl_clk_stats *clkstats;
574
575 if (device == NULL)
576 return 0;
577 clkstats = &device->pwrctrl.clk_stats;
578 ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
579 clkstats->on_time_old, clkstats->elapsed_old);
580 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
581 clkstats->on_time_old = 0;
582 clkstats->elapsed_old = 0;
583 }
584 return ret;
585}
586
587static int kgsl_pwrctrl_gputop_show(struct device *dev,
588 struct device_attribute *attr,
589 char *buf)
590{
591 int ret;
592 struct kgsl_device *device = kgsl_device_from_dev(dev);
593 struct kgsl_clk_stats *clkstats;
594 int i = 0;
595 char *ptr = buf;
596
597 if (device == NULL)
598 return 0;
599 clkstats = &device->pwrctrl.clk_stats;
600 ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
601 clkstats->elapsed_old);
602 for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
603 i++, ptr += ret)
604 ret = snprintf(ptr, PAGE_SIZE, "%7d ",
605 clkstats->old_clock_time[i]);
606
607 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
608 clkstats->on_time_old = 0;
609 clkstats->elapsed_old = 0;
610 for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
611 clkstats->old_clock_time[i] = 0;
612 }
613 return (unsigned int) (ptr - buf);
614}
615
616static int kgsl_pwrctrl_gpu_available_frequencies_show(
617 struct device *dev,
618 struct device_attribute *attr,
619 char *buf)
620{
621 struct kgsl_device *device = kgsl_device_from_dev(dev);
622 struct kgsl_pwrctrl *pwr;
623 int index, num_chars = 0;
624
625 if (device == NULL)
626 return 0;
627 pwr = &device->pwrctrl;
628 for (index = 0; index < pwr->num_pwrlevels - 1; index++)
629 num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
630 pwr->pwrlevels[index].gpu_freq);
631 buf[num_chars++] = '\n';
632 return num_chars;
633}
634
635static int kgsl_pwrctrl_reset_count_show(struct device *dev,
636 struct device_attribute *attr,
637 char *buf)
638{
639 struct kgsl_device *device = kgsl_device_from_dev(dev);
640 if (device == NULL)
641 return 0;
642 return snprintf(buf, PAGE_SIZE, "%d\n", device->reset_counter);
643}
644
645static void __force_on(struct kgsl_device *device, int flag, int on)
646{
647 if (on) {
648 switch (flag) {
649 case KGSL_PWRFLAGS_CLK_ON:
650 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON,
651 KGSL_STATE_ACTIVE);
652 break;
653 case KGSL_PWRFLAGS_AXI_ON:
654 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
655 break;
656 case KGSL_PWRFLAGS_POWER_ON:
657 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
658 break;
659 }
660 set_bit(flag, &device->pwrctrl.ctrl_flags);
661 } else {
662 clear_bit(flag, &device->pwrctrl.ctrl_flags);
663 }
664}
665
666static int __force_on_show(struct device *dev,
667 struct device_attribute *attr,
668 char *buf, int flag)
669{
670 struct kgsl_device *device = kgsl_device_from_dev(dev);
671 int i = test_bit(flag, &device->pwrctrl.ctrl_flags);
672 if (device == NULL)
673 return 0;
674 return snprintf(buf, PAGE_SIZE, "%d\n", i);
675}
676
677static int __force_on_store(struct device *dev,
678 struct device_attribute *attr,
679 const char *buf, size_t count,
680 int flag)
681{
682 char temp[20];
683 unsigned long val;
684 struct kgsl_device *device = kgsl_device_from_dev(dev);
685 int rc;
686
687 if (device == NULL)
688 return 0;
689
690 snprintf(temp, sizeof(temp), "%.*s",
691 (int)min(count, sizeof(temp) - 1), buf);
692 rc = kstrtoul(temp, 0, &val);
693 if (rc)
694 return rc;
695
696 mutex_lock(&device->mutex);
697 __force_on(device, flag, val);
698 mutex_unlock(&device->mutex);
699
700 return count;
701}
702
703static int kgsl_pwrctrl_force_clk_on_show(struct device *dev,
704 struct device_attribute *attr,
705 char *buf)
706{
707 return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_CLK_ON);
708}
709
710static int kgsl_pwrctrl_force_clk_on_store(struct device *dev,
711 struct device_attribute *attr,
712 const char *buf, size_t count)
713{
714 return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_CLK_ON);
715}
716
717static int kgsl_pwrctrl_force_bus_on_show(struct device *dev,
718 struct device_attribute *attr,
719 char *buf)
720{
721 return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_AXI_ON);
722}
723
724static int kgsl_pwrctrl_force_bus_on_store(struct device *dev,
725 struct device_attribute *attr,
726 const char *buf, size_t count)
727{
728 return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_AXI_ON);
729}
730
731static int kgsl_pwrctrl_force_rail_on_show(struct device *dev,
732 struct device_attribute *attr,
733 char *buf)
734{
735 return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_POWER_ON);
736}
737
738static int kgsl_pwrctrl_force_rail_on_store(struct device *dev,
739 struct device_attribute *attr,
740 const char *buf, size_t count)
741{
742 return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON);
743}
744
745DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
746DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
747 kgsl_pwrctrl_max_gpuclk_store);
748DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
749 kgsl_pwrctrl_idle_timer_store);
750DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
751 NULL);
752DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
753 NULL);
754DEVICE_ATTR(gpu_available_frequencies, 0444,
755 kgsl_pwrctrl_gpu_available_frequencies_show,
756 NULL);
757DEVICE_ATTR(max_pwrlevel, 0644,
758 kgsl_pwrctrl_max_pwrlevel_show,
759 kgsl_pwrctrl_max_pwrlevel_store);
760DEVICE_ATTR(min_pwrlevel, 0644,
761 kgsl_pwrctrl_min_pwrlevel_show,
762 kgsl_pwrctrl_min_pwrlevel_store);
763DEVICE_ATTR(thermal_pwrlevel, 0644,
764 kgsl_pwrctrl_thermal_pwrlevel_show,
765 kgsl_pwrctrl_thermal_pwrlevel_store);
766DEVICE_ATTR(num_pwrlevels, 0444,
767 kgsl_pwrctrl_num_pwrlevels_show,
768 NULL);
769DEVICE_ATTR(reset_count, 0444,
770 kgsl_pwrctrl_reset_count_show,
771 NULL);
772DEVICE_ATTR(pmqos_latency, 0644,
773 kgsl_pwrctrl_pmqos_latency_show,
774 kgsl_pwrctrl_pmqos_latency_store);
775DEVICE_ATTR(force_clk_on, 0644,
776 kgsl_pwrctrl_force_clk_on_show,
777 kgsl_pwrctrl_force_clk_on_store);
778DEVICE_ATTR(force_bus_on, 0644,
779 kgsl_pwrctrl_force_bus_on_show,
780 kgsl_pwrctrl_force_bus_on_store);
781DEVICE_ATTR(force_rail_on, 0644,
782 kgsl_pwrctrl_force_rail_on_show,
783 kgsl_pwrctrl_force_rail_on_store);
784
785static const struct device_attribute *pwrctrl_attr_list[] = {
786 &dev_attr_gpuclk,
787 &dev_attr_max_gpuclk,
788 &dev_attr_idle_timer,
789 &dev_attr_gpubusy,
790 &dev_attr_gputop,
791 &dev_attr_gpu_available_frequencies,
792 &dev_attr_max_pwrlevel,
793 &dev_attr_min_pwrlevel,
794 &dev_attr_thermal_pwrlevel,
795 &dev_attr_num_pwrlevels,
796 &dev_attr_reset_count,
797 &dev_attr_pmqos_latency,
798 &dev_attr_force_clk_on,
799 &dev_attr_force_bus_on,
800 &dev_attr_force_rail_on,
801 NULL
802};
803
804int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
805{
806 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
807}
808
809void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
810{
811 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
812}
813
814static void update_statistics(struct kgsl_device *device)
815{
816 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
817 unsigned int on_time = 0;
818 int i;
819 int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
820 /*PER CLK TIME*/
821 for (i = 0; i < num_pwrlevels; i++) {
822 clkstats->old_clock_time[i] = clkstats->clock_time[i];
823 on_time += clkstats->clock_time[i];
824 clkstats->clock_time[i] = 0;
825 }
826 clkstats->old_clock_time[num_pwrlevels] =
827 clkstats->clock_time[num_pwrlevels];
828 clkstats->clock_time[num_pwrlevels] = 0;
829 clkstats->on_time_old = on_time;
830 clkstats->elapsed_old = clkstats->elapsed;
831 clkstats->elapsed = 0;
832
833 trace_kgsl_gpubusy(device, clkstats->on_time_old,
834 clkstats->elapsed_old);
835}
836
837/* Track the amount of time the gpu is on vs the total system time. *
838 * Regularly update the percentage of busy time displayed by sysfs. */
839static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
840{
841 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
842 update_clk_statistics(device, on_time);
843 /* Update the output regularly and reset the counters. */
844 if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
845 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
846 update_statistics(device);
847 }
848}
849
850static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
851 int requested_state)
852{
853 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
854 int i = 0;
855
856 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
857 return;
858
859 if (state == KGSL_PWRFLAGS_OFF) {
860 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
861 &pwr->power_flags)) {
862 trace_kgsl_clk(device, state);
863 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
864 if (pwr->grp_clks[i])
865 clk_disable(pwr->grp_clks[i]);
866 /* High latency clock maintenance. */
867 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
868 (requested_state != KGSL_STATE_NAP)) {
869 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
870 if (pwr->grp_clks[i])
871 clk_unprepare(pwr->grp_clks[i]);
872 clk_set_rate(pwr->grp_clks[0],
873 pwr->pwrlevels[pwr->num_pwrlevels - 1].
874 gpu_freq);
875 }
876 kgsl_pwrctrl_busy_time(device, true);
877 } else if (requested_state == KGSL_STATE_SLEEP) {
878 /* High latency clock maintenance. */
879 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
880 if (pwr->grp_clks[i])
881 clk_unprepare(pwr->grp_clks[i]);
882 if ((pwr->pwrlevels[0].gpu_freq > 0))
883 clk_set_rate(pwr->grp_clks[0],
884 pwr->pwrlevels[pwr->num_pwrlevels - 1].
885 gpu_freq);
886 }
887 } else if (state == KGSL_PWRFLAGS_ON) {
888 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
889 &pwr->power_flags)) {
890 trace_kgsl_clk(device, state);
891 /* High latency clock maintenance. */
892 if (device->state != KGSL_STATE_NAP) {
893 if (pwr->pwrlevels[0].gpu_freq > 0)
894 clk_set_rate(pwr->grp_clks[0],
895 pwr->pwrlevels
896 [pwr->active_pwrlevel].
897 gpu_freq);
898 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
899 if (pwr->grp_clks[i])
900 clk_prepare(pwr->grp_clks[i]);
901 }
902 /* as last step, enable grp_clk
903 this is to let GPU interrupt to come */
904 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
905 if (pwr->grp_clks[i])
906 clk_enable(pwr->grp_clks[i]);
907 kgsl_pwrctrl_busy_time(device, false);
908 }
909 }
910}
911
912static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
913{
914 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
915
916 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->ctrl_flags))
917 return;
918
919 if (state == KGSL_PWRFLAGS_OFF) {
920 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
921 &pwr->power_flags)) {
922 trace_kgsl_bus(device, state);
923 if (pwr->ebi1_clk) {
924 clk_set_rate(pwr->ebi1_clk, 0);
925 clk_disable_unprepare(pwr->ebi1_clk);
926 }
927 if (pwr->pcl)
928 msm_bus_scale_client_update_request(pwr->pcl,
929 0);
930 }
931 } else if (state == KGSL_PWRFLAGS_ON) {
932 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
933 &pwr->power_flags)) {
934 trace_kgsl_bus(device, state);
935 if (pwr->ebi1_clk) {
936 clk_prepare_enable(pwr->ebi1_clk);
937 clk_set_rate(pwr->ebi1_clk,
938 pwr->pwrlevels[pwr->active_pwrlevel].
939 bus_freq);
940 }
941 if (pwr->pcl)
942 msm_bus_scale_client_update_request(pwr->pcl,
943 pwr->pwrlevels[pwr->active_pwrlevel].
944 bus_freq);
945 }
946 }
947}
948
949static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
950{
951 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
952
953 if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
954 return;
955
956 if (state == KGSL_PWRFLAGS_OFF) {
957 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
958 &pwr->power_flags)) {
959 trace_kgsl_rail(device, state);
960 if (pwr->gpu_cx)
961 regulator_disable(pwr->gpu_cx);
962 if (pwr->gpu_reg)
963 regulator_disable(pwr->gpu_reg);
964 }
965 } else if (state == KGSL_PWRFLAGS_ON) {
966 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
967 &pwr->power_flags)) {
968 trace_kgsl_rail(device, state);
969 if (pwr->gpu_reg) {
970 int status = regulator_enable(pwr->gpu_reg);
971 if (status)
972 KGSL_DRV_ERR(device,
973 "core regulator_enable "
974 "failed: %d\n",
975 status);
976 }
977 if (pwr->gpu_cx) {
978 int status = regulator_enable(pwr->gpu_cx);
979 if (status)
980 KGSL_DRV_ERR(device,
981 "cx regulator_enable "
982 "failed: %d\n",
983 status);
984 }
985 }
986 }
987}
988
989void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
990{
991 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
992
993 if (state == KGSL_PWRFLAGS_ON) {
994 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
995 &pwr->power_flags)) {
996 trace_kgsl_irq(device, state);
997 enable_irq(pwr->interrupt_num);
998 }
999 } else if (state == KGSL_PWRFLAGS_OFF) {
1000 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
1001 &pwr->power_flags)) {
1002 trace_kgsl_irq(device, state);
1003 if (in_interrupt())
1004 disable_irq_nosync(pwr->interrupt_num);
1005 else
1006 disable_irq(pwr->interrupt_num);
1007 }
1008 }
1009}
1010EXPORT_SYMBOL(kgsl_pwrctrl_irq);
1011
1012int kgsl_pwrctrl_init(struct kgsl_device *device)
1013{
1014 int i, result = 0;
1015 struct clk *clk;
1016 struct platform_device *pdev =
1017 container_of(device->parentdev, struct platform_device, dev);
1018 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1019 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
1020
1021 /*acquire clocks */
1022 for (i = 0; i < KGSL_MAX_CLKS; i++) {
1023 if (pdata->clk_map & clks[i].map) {
1024 clk = clk_get(&pdev->dev, clks[i].name);
1025 if (IS_ERR(clk))
1026 goto clk_err;
1027 pwr->grp_clks[i] = clk;
1028 }
1029 }
1030 /* Make sure we have a source clk for freq setting */
1031 if (pwr->grp_clks[0] == NULL)
1032 pwr->grp_clks[0] = pwr->grp_clks[1];
1033
1034 /* put the AXI bus into asynchronous mode with the graphics cores */
1035 if (pdata->set_grp_async != NULL)
1036 pdata->set_grp_async();
1037
1038 if (pdata->num_levels > KGSL_MAX_PWRLEVELS ||
1039 pdata->num_levels < 1) {
1040 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
1041 pdata->num_levels);
1042 result = -EINVAL;
1043 goto done;
1044 }
1045 pwr->num_pwrlevels = pdata->num_levels;
1046
1047 /* Initialize the user and thermal clock constraints */
1048
1049 pwr->max_pwrlevel = 0;
1050 pwr->min_pwrlevel = pdata->num_levels - 2;
1051 pwr->thermal_pwrlevel = 0;
1052
1053 pwr->active_pwrlevel = pdata->init_level;
1054 pwr->default_pwrlevel = pdata->init_level;
1055 pwr->init_pwrlevel = pdata->init_level;
1056 for (i = 0; i < pdata->num_levels; i++) {
1057 pwr->pwrlevels[i].gpu_freq =
1058 (pdata->pwrlevel[i].gpu_freq > 0) ?
1059 clk_round_rate(pwr->grp_clks[0],
1060 pdata->pwrlevel[i].
1061 gpu_freq) : 0;
1062 pwr->pwrlevels[i].bus_freq =
1063 pdata->pwrlevel[i].bus_freq;
1064 pwr->pwrlevels[i].io_fraction =
1065 pdata->pwrlevel[i].io_fraction;
1066 }
1067 /* Do not set_rate for targets in sync with AXI */
1068 if (pwr->pwrlevels[0].gpu_freq > 0)
1069 clk_set_rate(pwr->grp_clks[0], pwr->
1070 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
1071
1072 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
1073 if (IS_ERR(pwr->gpu_reg))
1074 pwr->gpu_reg = NULL;
1075
1076 if (pwr->gpu_reg) {
1077 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
1078 if (IS_ERR(pwr->gpu_cx))
1079 pwr->gpu_cx = NULL;
1080 } else
1081 pwr->gpu_cx = NULL;
1082
1083 pwr->power_flags = 0;
1084
1085 pwr->idle_needed = pdata->idle_needed;
1086 pwr->interval_timeout = pdata->idle_timeout;
1087 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
1088 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
1089 if (IS_ERR(pwr->ebi1_clk))
1090 pwr->ebi1_clk = NULL;
1091 else
1092 clk_set_rate(pwr->ebi1_clk,
1093 pwr->pwrlevels[pwr->active_pwrlevel].
1094 bus_freq);
1095 if (pdata->bus_scale_table != NULL) {
1096 pwr->pcl = msm_bus_scale_register_client(pdata->
1097 bus_scale_table);
1098 if (!pwr->pcl) {
1099 KGSL_PWR_ERR(device,
1100 "msm_bus_scale_register_client failed: "
1101 "id %d table %p", device->id,
1102 pdata->bus_scale_table);
1103 result = -EINVAL;
1104 goto done;
1105 }
1106 }
1107
1108 /* Set the power level step multiplier with 1 as the default */
1109 pwr->step_mul = pdata->step_mul ? pdata->step_mul : 1;
1110
1111 /* Set the CPU latency to 501usec to allow low latency PC modes */
1112 pwr->pm_qos_latency = 501;
1113
1114 pm_runtime_enable(device->parentdev);
1115 return result;
1116
1117clk_err:
1118 result = PTR_ERR(clk);
1119 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
1120 clks[i].name, result);
1121
1122done:
1123 return result;
1124}
1125
1126void kgsl_pwrctrl_close(struct kgsl_device *device)
1127{
1128 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1129 int i;
1130
1131 KGSL_PWR_INFO(device, "close device %d\n", device->id);
1132
1133 pm_runtime_disable(device->parentdev);
1134
1135 clk_put(pwr->ebi1_clk);
1136
1137 if (pwr->pcl)
1138 msm_bus_scale_unregister_client(pwr->pcl);
1139
1140 pwr->pcl = 0;
1141
1142 if (pwr->gpu_reg) {
1143 regulator_put(pwr->gpu_reg);
1144 pwr->gpu_reg = NULL;
1145 }
1146
1147 if (pwr->gpu_cx) {
1148 regulator_put(pwr->gpu_cx);
1149 pwr->gpu_cx = NULL;
1150 }
1151
1152 for (i = 1; i < KGSL_MAX_CLKS; i++)
1153 if (pwr->grp_clks[i]) {
1154 clk_put(pwr->grp_clks[i]);
1155 pwr->grp_clks[i] = NULL;
1156 }
1157
1158 pwr->grp_clks[0] = NULL;
1159 pwr->power_flags = 0;
1160}
1161
1162/**
1163 * kgsl_idle_check() - Work function for GPU interrupts and idle timeouts.
1164 * @device: The device
1165 *
1166 * This function is called for work that is queued by the interrupt
1167 * handler or the idle timer. It attempts to transition to a clocks
1168 * off state if the active_cnt is 0 and the hardware is idle.
1169 */
1170void kgsl_idle_check(struct work_struct *work)
1171{
1172 int delay = INIT_UDELAY;
1173 int requested_state;
1174 struct kgsl_device *device = container_of(work, struct kgsl_device,
1175 idle_check_ws);
1176 WARN_ON(device == NULL);
1177 if (device == NULL)
1178 return;
1179
1180 mutex_lock(&device->mutex);
1181
1182 kgsl_pwrscale_idle(device);
1183
1184 if (device->state == KGSL_STATE_ACTIVE
1185 || device->state == KGSL_STATE_NAP) {
1186 /*
1187 * If no user is explicitly trying to use the GPU
1188 * (active_cnt is zero), then loop with increasing delay,
1189 * waiting for the GPU to become idle.
1190 */
1191 while (!atomic_read(&device->active_cnt) &&
1192 (delay < MAX_UDELAY)) {
1193 requested_state = device->requested_state;
1194 if (!kgsl_pwrctrl_sleep(device))
1195 break;
1196 /*
1197 * If no new commands have been issued since the
1198 * last interrupt, stay in this loop waiting for
1199 * the GPU to become idle.
1200 */
1201 if (!device->pwrctrl.irq_last)
1202 break;
1203 kgsl_pwrctrl_request_state(device, requested_state);
1204 mutex_unlock(&device->mutex);
1205 udelay(delay);
1206 delay *= 2;
1207 mutex_lock(&device->mutex);
1208 }
1209
1210
1211 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1212 if (device->state == KGSL_STATE_ACTIVE) {
1213 mod_timer(&device->idle_timer,
1214 jiffies +
1215 device->pwrctrl.interval_timeout);
1216 /*
1217 * If the GPU has been too busy to sleep, make sure
1218 * that is acurately reflected in the % busy numbers.
1219 */
1220 device->pwrctrl.clk_stats.no_nap_cnt++;
1221 if (device->pwrctrl.clk_stats.no_nap_cnt >
1222 UPDATE_BUSY) {
1223 kgsl_pwrctrl_busy_time(device, true);
1224 device->pwrctrl.clk_stats.no_nap_cnt = 0;
1225 }
1226 } else {
1227 device->pwrctrl.irq_last = 0;
1228 }
1229 }
1230
1231 mutex_unlock(&device->mutex);
1232}
1233EXPORT_SYMBOL(kgsl_idle_check);
1234
1235void kgsl_timer(unsigned long data)
1236{
1237 struct kgsl_device *device = (struct kgsl_device *) data;
1238
1239 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
1240 if (device->requested_state != KGSL_STATE_SUSPEND) {
1241 if (device->pwrctrl.strtstp_sleepwake)
1242 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
1243 else
1244 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
1245 /* Have work run in a non-interrupt context. */
1246 queue_work(device->work_queue, &device->idle_check_ws);
1247 }
1248}
1249
1250bool kgsl_pwrctrl_isenabled(struct kgsl_device *device)
1251{
1252 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1253 return (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) != 0);
1254}
1255
1256/**
1257 * kgsl_pre_hwaccess - Enforce preconditions for touching registers
1258 * @device: The device
1259 *
1260 * This function ensures that the correct lock is held and that the GPU
1261 * clock is on immediately before a register is read or written. Note
1262 * that this function does not check active_cnt because the registers
1263 * must be accessed during device start and stop, when the active_cnt
1264 * may legitimately be 0.
1265 */
1266void kgsl_pre_hwaccess(struct kgsl_device *device)
1267{
1268 /* In order to touch a register you must hold the device mutex...*/
1269 BUG_ON(!mutex_is_locked(&device->mutex));
1270 /* and have the clock on! */
1271 BUG_ON(!kgsl_pwrctrl_isenabled(device));
1272}
1273EXPORT_SYMBOL(kgsl_pre_hwaccess);
1274
1275static int
1276_nap(struct kgsl_device *device)
1277{
1278 switch (device->state) {
1279 case KGSL_STATE_ACTIVE:
1280 if (!device->ftbl->isidle(device)) {
1281 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1282 return -EBUSY;
1283 }
1284 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
1285 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
1286 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
1287 case KGSL_STATE_NAP:
1288 case KGSL_STATE_SLEEP:
1289 case KGSL_STATE_SLUMBER:
1290 break;
1291 default:
1292 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1293 break;
1294 }
1295 return 0;
1296}
1297
1298static void
1299_sleep_accounting(struct kgsl_device *device)
1300{
1301 kgsl_pwrctrl_busy_time(device, false);
1302 device->pwrctrl.clk_stats.start = ktime_set(0, 0);
1303 device->pwrctrl.time = 0;
1304 kgsl_pwrscale_sleep(device);
1305}
1306
1307static int
1308_sleep(struct kgsl_device *device)
1309{
1310 switch (device->state) {
1311 case KGSL_STATE_ACTIVE:
1312 if (!device->ftbl->isidle(device)) {
1313 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1314 return -EBUSY;
1315 }
1316 /* fall through */
1317 case KGSL_STATE_NAP:
1318 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
1319 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
1320 _sleep_accounting(device);
1321 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
1322 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
1323 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1324 PM_QOS_DEFAULT_VALUE);
1325 break;
1326 case KGSL_STATE_SLEEP:
1327 case KGSL_STATE_SLUMBER:
1328 break;
1329 default:
1330 KGSL_PWR_WARN(device, "unhandled state %s\n",
1331 kgsl_pwrstate_to_str(device->state));
1332 break;
1333 }
1334
1335 kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
1336
1337 return 0;
1338}
1339
1340static int
1341_slumber(struct kgsl_device *device)
1342{
1343 switch (device->state) {
1344 case KGSL_STATE_ACTIVE:
1345 if (!device->ftbl->isidle(device)) {
1346 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1347 return -EBUSY;
1348 }
1349 /* fall through */
1350 case KGSL_STATE_NAP:
1351 case KGSL_STATE_SLEEP:
1352 del_timer_sync(&device->idle_timer);
1353 /* make sure power is on to stop the device*/
1354 kgsl_pwrctrl_enable(device);
1355 device->ftbl->suspend_context(device);
1356 device->ftbl->stop(device);
1357 _sleep_accounting(device);
1358 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
1359 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1360 PM_QOS_DEFAULT_VALUE);
1361 break;
1362 case KGSL_STATE_SLUMBER:
1363 break;
1364 default:
1365 KGSL_PWR_WARN(device, "unhandled state %s\n",
1366 kgsl_pwrstate_to_str(device->state));
1367 break;
1368 }
1369 return 0;
1370}
1371
1372/******************************************************************/
1373/* Caller must hold the device mutex. */
1374int kgsl_pwrctrl_sleep(struct kgsl_device *device)
1375{
1376 int status = 0;
1377 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
1378
1379 /* Work through the legal state transitions */
1380 switch (device->requested_state) {
1381 case KGSL_STATE_NAP:
1382 status = _nap(device);
1383 break;
1384 case KGSL_STATE_SLEEP:
1385 status = _sleep(device);
1386 kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
1387 break;
1388 case KGSL_STATE_SLUMBER:
1389 status = _slumber(device);
1390 break;
1391 default:
1392 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
1393 device->requested_state);
1394 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1395 status = -EINVAL;
1396 break;
1397 }
1398 return status;
1399}
1400EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
1401
1402/******************************************************************/
1403/* Caller must hold the device mutex. */
1404int kgsl_pwrctrl_wake(struct kgsl_device *device)
1405{
1406 int status = 0;
1407 unsigned int context_id;
1408 unsigned int state = device->state;
1409 unsigned int ts_processed = 0xdeaddead;
1410 struct kgsl_context *context;
1411
1412 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
1413 switch (device->state) {
1414 case KGSL_STATE_SLUMBER:
1415 status = device->ftbl->start(device);
1416 if (status) {
1417 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1418 KGSL_DRV_ERR(device, "start failed %d\n", status);
1419 break;
1420 }
1421 /* fall through */
1422 case KGSL_STATE_SLEEP:
1423 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1424 kgsl_pwrscale_wake(device);
1425 kgsl_sharedmem_readl(&device->memstore,
1426 (unsigned int *) &context_id,
1427 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
1428 current_context));
1429 context = kgsl_context_get(device, context_id);
1430 if (context)
1431 ts_processed = kgsl_readtimestamp(device, context,
1432 KGSL_TIMESTAMP_RETIRED);
1433 KGSL_PWR_INFO(device, "Wake from %s state. CTXT: %d RTRD TS: %08X\n",
1434 kgsl_pwrstate_to_str(state),
1435 context ? context->id : -1, ts_processed);
1436 kgsl_context_put(context);
1437 /* fall through */
1438 case KGSL_STATE_NAP:
1439 /* Turn on the core clocks */
1440 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
1441 /* Enable state before turning on irq */
1442 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
1443 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
Steve Kondik2deaf4b2014-03-20 22:29:09 -07001444 mod_timer(&device->idle_timer, jiffies +
1445 device->pwrctrl.interval_timeout);
Steve Kondikf7652b32013-11-26 15:20:51 -08001446 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1447 device->pwrctrl.pm_qos_latency);
1448 case KGSL_STATE_ACTIVE:
1449 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1450 break;
1451 default:
1452 KGSL_PWR_WARN(device, "unhandled state %s\n",
1453 kgsl_pwrstate_to_str(device->state));
1454 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1455 status = -EINVAL;
1456 break;
1457 }
1458 return status;
1459}
1460EXPORT_SYMBOL(kgsl_pwrctrl_wake);
1461
1462void kgsl_pwrctrl_enable(struct kgsl_device *device)
1463{
1464 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1465 /* Order pwrrail/clk sequence based upon platform */
1466 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
1467 kgsl_pwrctrl_pwrlevel_change(device, pwr->default_pwrlevel);
1468 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
1469 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1470}
1471EXPORT_SYMBOL(kgsl_pwrctrl_enable);
1472
1473void kgsl_pwrctrl_disable(struct kgsl_device *device)
1474{
1475 /* Order pwrrail/clk sequence based upon platform */
1476 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
1477 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
1478 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
1479}
1480EXPORT_SYMBOL(kgsl_pwrctrl_disable);
1481
1482void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
1483{
1484 trace_kgsl_pwr_set_state(device, state);
1485 device->state = state;
1486 device->requested_state = KGSL_STATE_NONE;
1487}
1488EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
1489
1490void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
1491{
1492 if (state != KGSL_STATE_NONE && state != device->requested_state)
1493 trace_kgsl_pwr_request_state(device, state);
1494 device->requested_state = state;
1495}
1496EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
1497
1498const char *kgsl_pwrstate_to_str(unsigned int state)
1499{
1500 switch (state) {
1501 case KGSL_STATE_NONE:
1502 return "NONE";
1503 case KGSL_STATE_INIT:
1504 return "INIT";
1505 case KGSL_STATE_ACTIVE:
1506 return "ACTIVE";
1507 case KGSL_STATE_NAP:
1508 return "NAP";
1509 case KGSL_STATE_SLEEP:
1510 return "SLEEP";
1511 case KGSL_STATE_SUSPEND:
1512 return "SUSPEND";
1513 case KGSL_STATE_SLUMBER:
1514 return "SLUMBER";
1515 default:
1516 break;
1517 }
1518 return "UNKNOWN";
1519}
1520EXPORT_SYMBOL(kgsl_pwrstate_to_str);
1521
1522
1523/**
1524 * kgsl_active_count_get() - Increase the device active count
1525 * @device: Pointer to a KGSL device
1526 *
1527 * Increase the active count for the KGSL device and turn on
1528 * clocks if this is the first reference. Code paths that need
1529 * to touch the hardware or wait for the hardware to complete
1530 * an operation must hold an active count reference until they
1531 * are finished. An error code will be returned if waking the
1532 * device fails. The device mutex must be held while *calling
1533 * this function.
1534 */
1535int kgsl_active_count_get(struct kgsl_device *device)
1536{
1537 int ret = 0;
1538 BUG_ON(!mutex_is_locked(&device->mutex));
1539
1540 if (atomic_read(&device->active_cnt) == 0) {
1541 if (device->requested_state == KGSL_STATE_SUSPEND ||
1542 device->state == KGSL_STATE_SUSPEND) {
1543 mutex_unlock(&device->mutex);
1544 wait_for_completion(&device->hwaccess_gate);
1545 mutex_lock(&device->mutex);
1546 }
1547
Steve Kondikf7652b32013-11-26 15:20:51 -08001548 ret = kgsl_pwrctrl_wake(device);
1549 }
1550 if (ret == 0)
1551 atomic_inc(&device->active_cnt);
1552 trace_kgsl_active_count(device,
1553 (unsigned long) __builtin_return_address(0));
1554 return ret;
1555}
1556EXPORT_SYMBOL(kgsl_active_count_get);
1557
1558/**
1559 * kgsl_active_count_get_light() - Increase the device active count
1560 * @device: Pointer to a KGSL device
1561 *
1562 * Increase the active count for the KGSL device WITHOUT
1563 * turning on the clocks based on the assumption that the clocks are already
1564 * on from a previous active_count_get(). Currently this is only used for
1565 * creating kgsl_events.
1566 */
1567int kgsl_active_count_get_light(struct kgsl_device *device)
1568{
1569 if (atomic_inc_not_zero(&device->active_cnt) == 0) {
1570 dev_WARN_ONCE(device->dev, 1, "active count is 0!\n");
1571 return -EINVAL;
1572 }
1573
1574 trace_kgsl_active_count(device,
1575 (unsigned long) __builtin_return_address(0));
1576 return 0;
1577}
1578EXPORT_SYMBOL(kgsl_active_count_get_light);
1579
1580/**
1581 * kgsl_active_count_put() - Decrease the device active count
1582 * @device: Pointer to a KGSL device
1583 *
1584 * Decrease the active count for the KGSL device and turn off
1585 * clocks if there are no remaining references. This function will
1586 * transition the device to NAP if there are no other pending state
1587 * changes. It also completes the suspend gate. The device mutex must
1588 * be held while calling this function.
1589 */
1590void kgsl_active_count_put(struct kgsl_device *device)
1591{
1592 BUG_ON(!mutex_is_locked(&device->mutex));
1593 BUG_ON(atomic_read(&device->active_cnt) == 0);
1594
1595 kgsl_pwrscale_idle(device);
1596
1597 if (atomic_dec_and_test(&device->active_cnt)) {
1598 if (device->state == KGSL_STATE_ACTIVE &&
1599 device->requested_state == KGSL_STATE_NONE) {
1600 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
1601 if (kgsl_pwrctrl_sleep(device)) {
1602 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
1603 queue_work(device->work_queue, &device->idle_check_ws);
1604 }
1605 }
1606
1607 mod_timer(&device->idle_timer,
1608 jiffies + device->pwrctrl.interval_timeout);
1609 }
1610
1611 trace_kgsl_active_count(device,
1612 (unsigned long) __builtin_return_address(0));
1613
1614 wake_up(&device->active_cnt_wq);
1615}
1616EXPORT_SYMBOL(kgsl_active_count_put);
1617
1618static int _check_active_count(struct kgsl_device *device, int count)
1619{
1620 /* Return 0 if the active count is greater than the desired value */
1621 return atomic_read(&device->active_cnt) > count ? 0 : 1;
1622}
1623
1624/**
1625 * kgsl_active_count_wait() - Wait for activity to finish.
1626 * @device: Pointer to a KGSL device
1627 * @count: Active count value to wait for
1628 *
1629 * Block until the active_cnt value hits the desired value
1630 */
1631int kgsl_active_count_wait(struct kgsl_device *device, int count)
1632{
1633 int result = 0;
1634
1635 BUG_ON(!mutex_is_locked(&device->mutex));
1636
1637 if (atomic_read(&device->active_cnt) > count) {
1638 int ret;
1639 mutex_unlock(&device->mutex);
1640 ret = wait_event_timeout(device->active_cnt_wq,
1641 _check_active_count(device, count), HZ);
1642 mutex_lock(&device->mutex);
1643 result = ret == 0 ? -ETIMEDOUT : 0;
1644 }
1645
1646 return result;
1647}
1648EXPORT_SYMBOL(kgsl_active_count_wait);