blob: 26cc96c087ee6be57ddf58a426e990fc21060429 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080020#include <linux/ktime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_pwrscale.h"
24#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070025#include "kgsl_trace.h"
Ranjhith Kalisamycb1721c2013-05-28 16:59:59 -060026#include "kgsl_sharedmem.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060028#define KGSL_PWRFLAGS_POWER_ON 0
29#define KGSL_PWRFLAGS_CLK_ON 1
30#define KGSL_PWRFLAGS_AXI_ON 2
31#define KGSL_PWRFLAGS_IRQ_ON 3
32
Lucille Sylvester10297892012-02-27 13:54:47 -070033#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070034#define UPDATE_BUSY_VAL 1000000
35#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060037struct clk_pair {
38 const char *name;
39 uint map;
40};
41
42struct clk_pair clks[KGSL_MAX_CLKS] = {
43 {
44 .name = "src_clk",
45 .map = KGSL_CLK_SRC,
46 },
47 {
48 .name = "core_clk",
49 .map = KGSL_CLK_CORE,
50 },
51 {
52 .name = "iface_clk",
53 .map = KGSL_CLK_IFACE,
54 },
55 {
56 .name = "mem_clk",
57 .map = KGSL_CLK_MEM,
58 },
59 {
60 .name = "mem_iface_clk",
61 .map = KGSL_CLK_MEM_IFACE,
62 },
63};
64
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080065/* Update the elapsed time at a particular clock level
66 * if the device is active(on_time = true).Otherwise
67 * store it as sleep time.
68 */
69static void update_clk_statistics(struct kgsl_device *device,
70 bool on_time)
71{
72 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
73 struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
74 ktime_t elapsed;
75 int elapsed_us;
76 if (clkstats->start.tv64 == 0)
77 clkstats->start = ktime_get();
78 clkstats->stop = ktime_get();
79 elapsed = ktime_sub(clkstats->stop, clkstats->start);
80 elapsed_us = ktime_to_us(elapsed);
81 clkstats->elapsed += elapsed_us;
82 if (on_time)
83 clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
84 else
85 clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
86 clkstats->start = ktime_get();
87}
88
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -070089/*
90 * Given a requested power level do bounds checking on the constraints and
91 * return the nearest possible level
92 */
93
94static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
95{
Jordan Crouse00a01ba2012-12-05 15:58:16 -070096 int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
97 int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -070098
99 if (level < max_pwrlevel)
100 return max_pwrlevel;
101 if (level > min_pwrlevel)
102 return min_pwrlevel;
103
104 return level;
105}
106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
108 unsigned int new_level)
109{
110 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700111 struct kgsl_pwrlevel *pwrlevel;
112 int delta;
Jordan Crouse3c337a32012-12-04 16:16:51 -0700113 int level;
Jordan Crousea29a2e02012-08-14 09:09:23 -0600114
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700115 /* Adjust the power level to the current constraints */
116 new_level = _adjust_pwrlevel(pwr, new_level);
117
118 if (new_level == pwr->active_pwrlevel)
119 return;
120
121 delta = new_level < pwr->active_pwrlevel ? -1 : 1;
122
123 update_clk_statistics(device, true);
124
Jordan Crouse3c337a32012-12-04 16:16:51 -0700125 level = pwr->active_pwrlevel;
126
127 /*
128 * Set the active powerlevel first in case the clocks are off - if we
129 * don't do this then the pwrlevel change won't take effect when the
130 * clocks come back
131 */
132
133 pwr->active_pwrlevel = new_level;
134
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700135 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
136 (device->state == KGSL_STATE_NAP)) {
137
138 /*
139 * On some platforms, instability is caused on
140 * changing clock freq when the core is busy.
141 * Idle the gpu core before changing the clock freq.
142 */
143
144 if (pwr->idle_needed == true)
145 device->ftbl->idle(device);
146
147 /*
148 * Don't shift by more than one level at a time to
149 * avoid glitches.
150 */
151
Jordan Crouse3c337a32012-12-04 16:16:51 -0700152 while (level != new_level) {
153 level += delta;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700154
155 clk_set_rate(pwr->grp_clks[0],
Jordan Crouse3c337a32012-12-04 16:16:51 -0700156 pwr->pwrlevels[level].gpu_freq);
Kedar Joshic11d0982012-02-07 10:59:49 +0530157 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158 }
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700159
160 pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
161
162 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
163
164 if (pwr->pcl)
165 msm_bus_scale_client_update_request(pwr->pcl,
166 pwrlevel->bus_freq);
167 else if (pwr->ebi1_clk)
168 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
169 }
170
171 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172}
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
175
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700176static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
177 struct device_attribute *attr,
178 const char *buf, size_t count)
179{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600181 struct kgsl_pwrctrl *pwr;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700182 int ret, level;
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600183
184 if (device == NULL)
185 return 0;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700186
187 pwr = &device->pwrctrl;
188
189 ret = sscanf(buf, "%d", &level);
190 if (ret != 1)
191 return count;
192
193 if (level < 0)
194 return count;
195
196 mutex_lock(&device->mutex);
197
198 if (level > pwr->num_pwrlevels - 2)
199 level = pwr->num_pwrlevels - 2;
200
201 pwr->thermal_pwrlevel = level;
202
203 /*
204 * If there is no power policy set the clock to the requested thermal
205 * level - if thermal now happens to be higher than max, then that will
206 * be limited by the pwrlevel change function. Otherwise if there is
207 * a policy only change the active clock if it is higher then the new
208 * thermal level
209 */
210
211 if (device->pwrscale.policy == NULL ||
212 pwr->thermal_pwrlevel > pwr->active_pwrlevel)
213 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
214
215 mutex_unlock(&device->mutex);
216
217 return count;
218}
219
220static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
221 struct device_attribute *attr,
222 char *buf)
223{
224
225 struct kgsl_device *device = kgsl_device_from_dev(dev);
226 struct kgsl_pwrctrl *pwr;
227 if (device == NULL)
228 return 0;
229 pwr = &device->pwrctrl;
230 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
231}
232
233static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
234 struct device_attribute *attr,
235 const char *buf, size_t count)
236{
237 struct kgsl_device *device = kgsl_device_from_dev(dev);
238 struct kgsl_pwrctrl *pwr;
239 int ret, level, max_level;
240
241 if (device == NULL)
242 return 0;
243
244 pwr = &device->pwrctrl;
245
246 ret = sscanf(buf, "%d", &level);
247 if (ret != 1)
248 return count;
249
250 /* If the use specifies a negative number, then don't change anything */
251 if (level < 0)
252 return count;
253
254 mutex_lock(&device->mutex);
255
256 /* You can't set a maximum power level lower than the minimum */
257 if (level > pwr->min_pwrlevel)
258 level = pwr->min_pwrlevel;
259
260 pwr->max_pwrlevel = level;
261
262
263 max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
264
265 /*
266 * If there is no policy then move to max by default. Otherwise only
267 * move max if the current level happens to be higher then the new max
268 */
269
270 if (device->pwrscale.policy == NULL ||
271 (max_level > pwr->active_pwrlevel))
272 kgsl_pwrctrl_pwrlevel_change(device, max_level);
273
274 mutex_unlock(&device->mutex);
275
276 return count;
277}
278
279static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
280 struct device_attribute *attr,
281 char *buf)
282{
283
284 struct kgsl_device *device = kgsl_device_from_dev(dev);
285 struct kgsl_pwrctrl *pwr;
286 if (device == NULL)
287 return 0;
288 pwr = &device->pwrctrl;
289 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
290}
291
292static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
293 struct device_attribute *attr,
294 const char *buf, size_t count)
295{ struct kgsl_device *device = kgsl_device_from_dev(dev);
296 struct kgsl_pwrctrl *pwr;
297 int ret, level, min_level;
298
299 if (device == NULL)
300 return 0;
301
302 pwr = &device->pwrctrl;
303
304 ret = sscanf(buf, "%d", &level);
305 if (ret != 1)
306 return count;
307
308 /* Don't do anything on obviously incorrect values */
309 if (level < 0)
310 return count;
311
312 mutex_lock(&device->mutex);
313 if (level > pwr->num_pwrlevels - 2)
314 level = pwr->num_pwrlevels - 2;
315
316 /* You can't set a minimum power level lower than the maximum */
317 if (level < pwr->max_pwrlevel)
318 level = pwr->max_pwrlevel;
319
320 pwr->min_pwrlevel = level;
321
322 min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
323
324 /* Only move the power level higher if minimum is higher then the
325 * current level
326 */
327
328 if (min_level < pwr->active_pwrlevel)
329 kgsl_pwrctrl_pwrlevel_change(device, min_level);
330
331 mutex_unlock(&device->mutex);
332
333 return count;
334}
335
336static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
337 struct device_attribute *attr,
338 char *buf)
339{
340 struct kgsl_device *device = kgsl_device_from_dev(dev);
341 struct kgsl_pwrctrl *pwr;
342 if (device == NULL)
343 return 0;
344 pwr = &device->pwrctrl;
345 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
346}
347
348static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
349 struct device_attribute *attr,
350 char *buf)
351{
352
353 struct kgsl_device *device = kgsl_device_from_dev(dev);
354 struct kgsl_pwrctrl *pwr;
355 if (device == NULL)
356 return 0;
357 pwr = &device->pwrctrl;
358 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
359}
360
361/* Given a GPU clock value, return the nearest powerlevel */
362
363static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
364{
365 int i;
366
367 for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
368 if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
369 return i;
370 }
371
372 return -ERANGE;
373}
374
375static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
376 struct device_attribute *attr,
377 const char *buf, size_t count)
378{
379 struct kgsl_device *device = kgsl_device_from_dev(dev);
380 struct kgsl_pwrctrl *pwr;
381 unsigned long val;
382 int ret, level;
383
384 if (device == NULL)
385 return 0;
386
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600387 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388
389 ret = sscanf(buf, "%ld", &val);
390 if (ret != 1)
391 return count;
392
393 mutex_lock(&device->mutex);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700394 level = _get_nearest_pwrlevel(pwr, val);
395 if (level < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 goto done;
397
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700398 pwr->thermal_pwrlevel = level;
399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400 /*
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700401 * if the thermal limit is lower than the current setting,
402 * move the speed down immediately
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403 */
404
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700405 if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407
408done:
409 mutex_unlock(&device->mutex);
410 return count;
411}
412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
414 struct device_attribute *attr,
415 char *buf)
416{
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600419 struct kgsl_pwrctrl *pwr;
420 if (device == NULL)
421 return 0;
422 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 return snprintf(buf, PAGE_SIZE, "%d\n",
424 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
425}
426
427static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
428 struct device_attribute *attr,
429 const char *buf, size_t count)
430{
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700431 struct kgsl_device *device = kgsl_device_from_dev(dev);
432 struct kgsl_pwrctrl *pwr;
433 unsigned long val;
434 int ret, level;
435
436 if (device == NULL)
437 return 0;
438
439 pwr = &device->pwrctrl;
440
441 ret = sscanf(buf, "%ld", &val);
442 if (ret != 1)
443 return count;
444
445 mutex_lock(&device->mutex);
446 level = _get_nearest_pwrlevel(pwr, val);
447 if (level >= 0)
448 kgsl_pwrctrl_pwrlevel_change(device, level);
449
450 mutex_unlock(&device->mutex);
451 return count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452}
453
454static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
455 struct device_attribute *attr,
456 char *buf)
457{
458 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600459 struct kgsl_pwrctrl *pwr;
460 if (device == NULL)
461 return 0;
462 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700463 return snprintf(buf, PAGE_SIZE, "%d\n",
464 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
465}
466
467static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
468 struct device_attribute *attr,
469 const char *buf, size_t count)
470{
471 char temp[20];
472 unsigned long val;
473 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600474 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700475 int rc;
476
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600477 if (device == NULL)
478 return 0;
479 pwr = &device->pwrctrl;
480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 snprintf(temp, sizeof(temp), "%.*s",
482 (int)min(count, sizeof(temp) - 1), buf);
483 rc = strict_strtoul(temp, 0, &val);
484 if (rc)
485 return rc;
486
487 mutex_lock(&device->mutex);
488
489 if (val == 1)
490 pwr->nap_allowed = true;
491 else if (val == 0)
492 pwr->nap_allowed = false;
493
494 mutex_unlock(&device->mutex);
495
496 return count;
497}
498
499static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
500 struct device_attribute *attr,
501 char *buf)
502{
503 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600504 if (device == NULL)
505 return 0;
506 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507}
508
509
510static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
511 struct device_attribute *attr,
512 const char *buf, size_t count)
513{
514 char temp[20];
515 unsigned long val;
516 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600517 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 const long div = 1000/HZ;
519 static unsigned int org_interval_timeout = 1;
520 int rc;
521
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600522 if (device == NULL)
523 return 0;
524 pwr = &device->pwrctrl;
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 snprintf(temp, sizeof(temp), "%.*s",
527 (int)min(count, sizeof(temp) - 1), buf);
528 rc = strict_strtoul(temp, 0, &val);
529 if (rc)
530 return rc;
531
532 if (org_interval_timeout == 1)
533 org_interval_timeout = pwr->interval_timeout;
534
535 mutex_lock(&device->mutex);
536
537 /* Let the timeout be requested in ms, but convert to jiffies. */
538 val /= div;
539 if (val >= org_interval_timeout)
540 pwr->interval_timeout = val;
541
542 mutex_unlock(&device->mutex);
543
544 return count;
545}
546
547static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
548 struct device_attribute *attr,
549 char *buf)
550{
551 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600552 if (device == NULL)
553 return 0;
554 return snprintf(buf, PAGE_SIZE, "%d\n",
555 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556}
557
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700558static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
559 struct device_attribute *attr,
560 char *buf)
561{
562 int ret;
563 struct kgsl_device *device = kgsl_device_from_dev(dev);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800564 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
565 ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
566 clkstats->on_time_old, clkstats->elapsed_old);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700567 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800568 clkstats->on_time_old = 0;
569 clkstats->elapsed_old = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700570 }
571 return ret;
572}
573
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800574static int kgsl_pwrctrl_gputop_show(struct device *dev,
575 struct device_attribute *attr,
576 char *buf)
577{
578 int ret;
579 struct kgsl_device *device = kgsl_device_from_dev(dev);
580 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
581 int i = 0;
582 char *ptr = buf;
583
584 ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
585 clkstats->elapsed_old);
586 for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
587 i++, ptr += ret)
588 ret = snprintf(ptr, PAGE_SIZE, "%7d ",
589 clkstats->old_clock_time[i]);
590
591 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
592 clkstats->on_time_old = 0;
593 clkstats->elapsed_old = 0;
594 for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
595 clkstats->old_clock_time[i] = 0;
596 }
597 return (unsigned int) (ptr - buf);
598}
599
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530600static int kgsl_pwrctrl_gpu_available_frequencies_show(
601 struct device *dev,
602 struct device_attribute *attr,
603 char *buf)
604{
605 struct kgsl_device *device = kgsl_device_from_dev(dev);
606 struct kgsl_pwrctrl *pwr;
607 int index, num_chars = 0;
608
609 if (device == NULL)
610 return 0;
611 pwr = &device->pwrctrl;
612 for (index = 0; index < pwr->num_pwrlevels - 1; index++)
613 num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
614 pwr->pwrlevels[index].gpu_freq);
615 buf[num_chars++] = '\n';
616 return num_chars;
617}
618
Jordan Crouse013cf422013-05-28 17:03:32 -0600619static int kgsl_pwrctrl_reset_count_show(struct device *dev,
620 struct device_attribute *attr,
621 char *buf)
622{
623 struct kgsl_device *device = kgsl_device_from_dev(dev);
624 return snprintf(buf, PAGE_SIZE, "%d\n", device->reset_counter);
625}
626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
628DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
629 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530630DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
632 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800633DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
634 NULL);
635DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700636 NULL);
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530637DEVICE_ATTR(gpu_available_frequencies, 0444,
638 kgsl_pwrctrl_gpu_available_frequencies_show,
639 NULL);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700640DEVICE_ATTR(max_pwrlevel, 0644,
641 kgsl_pwrctrl_max_pwrlevel_show,
642 kgsl_pwrctrl_max_pwrlevel_store);
643DEVICE_ATTR(min_pwrlevel, 0644,
644 kgsl_pwrctrl_min_pwrlevel_show,
645 kgsl_pwrctrl_min_pwrlevel_store);
646DEVICE_ATTR(thermal_pwrlevel, 0644,
647 kgsl_pwrctrl_thermal_pwrlevel_show,
648 kgsl_pwrctrl_thermal_pwrlevel_store);
649DEVICE_ATTR(num_pwrlevels, 0444,
650 kgsl_pwrctrl_num_pwrlevels_show,
651 NULL);
Jordan Crouse013cf422013-05-28 17:03:32 -0600652DEVICE_ATTR(reset_count, 0444,
653 kgsl_pwrctrl_reset_count_show,
654 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700655
656static const struct device_attribute *pwrctrl_attr_list[] = {
657 &dev_attr_gpuclk,
658 &dev_attr_max_gpuclk,
659 &dev_attr_pwrnap,
660 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700661 &dev_attr_gpubusy,
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800662 &dev_attr_gputop,
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530663 &dev_attr_gpu_available_frequencies,
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700664 &dev_attr_max_pwrlevel,
665 &dev_attr_min_pwrlevel,
666 &dev_attr_thermal_pwrlevel,
667 &dev_attr_num_pwrlevels,
Jordan Crouse013cf422013-05-28 17:03:32 -0600668 &dev_attr_reset_count,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 NULL
670};
671
672int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
673{
674 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
675}
676
677void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
678{
679 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
680}
681
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800682static void update_statistics(struct kgsl_device *device)
683{
684 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
685 unsigned int on_time = 0;
686 int i;
687 int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
688 /*PER CLK TIME*/
689 for (i = 0; i < num_pwrlevels; i++) {
690 clkstats->old_clock_time[i] = clkstats->clock_time[i];
691 on_time += clkstats->clock_time[i];
692 clkstats->clock_time[i] = 0;
693 }
694 clkstats->old_clock_time[num_pwrlevels] =
695 clkstats->clock_time[num_pwrlevels];
696 clkstats->clock_time[num_pwrlevels] = 0;
697 clkstats->on_time_old = on_time;
698 clkstats->elapsed_old = clkstats->elapsed;
699 clkstats->elapsed = 0;
700}
701
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700702/* Track the amount of time the gpu is on vs the total system time. *
703 * Regularly update the percentage of busy time displayed by sysfs. */
704static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
705{
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800706 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
707 update_clk_statistics(device, on_time);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700708 /* Update the output regularly and reset the counters. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800709 if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700710 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800711 update_statistics(device);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700712 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700713}
714
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600715void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
716 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717{
718 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
719 int i = 0;
720 if (state == KGSL_PWRFLAGS_OFF) {
721 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
722 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700723 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
725 if (pwr->grp_clks[i])
726 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700727 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700729 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730 clk_set_rate(pwr->grp_clks[0],
731 pwr->pwrlevels[pwr->num_pwrlevels - 1].
732 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700733 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
734 if (pwr->grp_clks[i])
735 clk_unprepare(pwr->grp_clks[i]);
736 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700737 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju426da0d2012-09-11 13:03:28 -0700738 } else if (requested_state == KGSL_STATE_SLEEP) {
739 /* High latency clock maintenance. */
740 if ((pwr->pwrlevels[0].gpu_freq > 0))
741 clk_set_rate(pwr->grp_clks[0],
742 pwr->pwrlevels[pwr->num_pwrlevels - 1].
743 gpu_freq);
744 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
745 if (pwr->grp_clks[i])
746 clk_unprepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700747 }
748 } else if (state == KGSL_PWRFLAGS_ON) {
749 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
750 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700751 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700752 /* High latency clock maintenance. */
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530753 if (device->state != KGSL_STATE_NAP) {
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700754 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
755 if (pwr->grp_clks[i])
756 clk_prepare(pwr->grp_clks[i]);
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530757
758 if (pwr->pwrlevels[0].gpu_freq > 0)
759 clk_set_rate(pwr->grp_clks[0],
760 pwr->pwrlevels
761 [pwr->active_pwrlevel].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700763 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 /* as last step, enable grp_clk
765 this is to let GPU interrupt to come */
766 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
767 if (pwr->grp_clks[i])
768 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700769 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770 }
771 }
772}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700773
774void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
775{
776 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
777
778 if (state == KGSL_PWRFLAGS_OFF) {
779 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
780 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700781 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530782 if (pwr->ebi1_clk) {
783 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600784 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530785 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 if (pwr->pcl)
787 msm_bus_scale_client_update_request(pwr->pcl,
788 0);
789 }
790 } else if (state == KGSL_PWRFLAGS_ON) {
791 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
792 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700793 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530794 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600795 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530796 clk_set_rate(pwr->ebi1_clk,
797 pwr->pwrlevels[pwr->active_pwrlevel].
798 bus_freq);
799 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 if (pwr->pcl)
801 msm_bus_scale_client_update_request(pwr->pcl,
802 pwr->pwrlevels[pwr->active_pwrlevel].
803 bus_freq);
804 }
805 }
806}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807
808void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
809{
810 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
811
812 if (state == KGSL_PWRFLAGS_OFF) {
813 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
814 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700815 trace_kgsl_rail(device, state);
Pu Chen12053782012-07-24 17:04:27 -0700816 if (pwr->gpu_cx)
817 regulator_disable(pwr->gpu_cx);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 if (pwr->gpu_reg)
819 regulator_disable(pwr->gpu_reg);
820 }
821 } else if (state == KGSL_PWRFLAGS_ON) {
822 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
823 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700824 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600825 if (pwr->gpu_reg) {
826 int status = regulator_enable(pwr->gpu_reg);
827 if (status)
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700828 KGSL_DRV_ERR(device,
829 "core regulator_enable "
830 "failed: %d\n",
831 status);
832 }
Pu Chen12053782012-07-24 17:04:27 -0700833 if (pwr->gpu_cx) {
834 int status = regulator_enable(pwr->gpu_cx);
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700835 if (status)
836 KGSL_DRV_ERR(device,
837 "cx regulator_enable "
838 "failed: %d\n",
839 status);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600840 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 }
842 }
843}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844
845void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
846{
847 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
848
849 if (state == KGSL_PWRFLAGS_ON) {
850 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
851 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700852 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 }
855 } else if (state == KGSL_PWRFLAGS_OFF) {
856 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
857 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700858 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600859 if (in_interrupt())
860 disable_irq_nosync(pwr->interrupt_num);
861 else
862 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863 }
864 }
865}
866EXPORT_SYMBOL(kgsl_pwrctrl_irq);
867
868int kgsl_pwrctrl_init(struct kgsl_device *device)
869{
870 int i, result = 0;
871 struct clk *clk;
872 struct platform_device *pdev =
873 container_of(device->parentdev, struct platform_device, dev);
874 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600875 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876
877 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600878 for (i = 0; i < KGSL_MAX_CLKS; i++) {
879 if (pdata->clk_map & clks[i].map) {
880 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881 if (IS_ERR(clk))
882 goto clk_err;
883 pwr->grp_clks[i] = clk;
884 }
885 }
886 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600887 if (pwr->grp_clks[0] == NULL)
888 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889
890 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600891 if (pdata->set_grp_async != NULL)
892 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893
Lucille Sylvesterd260c882013-05-28 16:59:46 -0600894 if (pdata->num_levels > KGSL_MAX_PWRLEVELS ||
895 pdata->num_levels < 1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700896 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600897 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 result = -EINVAL;
899 goto done;
900 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600901 pwr->num_pwrlevels = pdata->num_levels;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700902
903 /* Initialize the user and thermal clock constraints */
904
905 pwr->max_pwrlevel = 0;
906 pwr->min_pwrlevel = pdata->num_levels - 2;
907 pwr->thermal_pwrlevel = 0;
908
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600909 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800910 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600911 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600913 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600915 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 gpu_freq) : 0;
917 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600918 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600919 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600920 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 }
922 /* Do not set_rate for targets in sync with AXI */
923 if (pwr->pwrlevels[0].gpu_freq > 0)
924 clk_set_rate(pwr->grp_clks[0], pwr->
925 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
926
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700927 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 if (IS_ERR(pwr->gpu_reg))
929 pwr->gpu_reg = NULL;
930
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700931 if (pwr->gpu_reg) {
Pu Chen12053782012-07-24 17:04:27 -0700932 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
933 if (IS_ERR(pwr->gpu_cx))
934 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700935 } else
Pu Chen12053782012-07-24 17:04:27 -0700936 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938 pwr->power_flags = 0;
939
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600940 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530941 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600942 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700943 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700944 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945 if (IS_ERR(pwr->ebi1_clk))
946 pwr->ebi1_clk = NULL;
947 else
948 clk_set_rate(pwr->ebi1_clk,
949 pwr->pwrlevels[pwr->active_pwrlevel].
950 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600951 if (pdata->bus_scale_table != NULL) {
952 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953 bus_scale_table);
954 if (!pwr->pcl) {
955 KGSL_PWR_ERR(device,
956 "msm_bus_scale_register_client failed: "
957 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600958 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959 result = -EINVAL;
960 goto done;
961 }
962 }
963
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700965 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966 register_early_suspend(&device->display_off);
967 return result;
968
969clk_err:
970 result = PTR_ERR(clk);
971 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600972 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973
974done:
975 return result;
976}
977
978void kgsl_pwrctrl_close(struct kgsl_device *device)
979{
980 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
981 int i;
982
983 KGSL_PWR_INFO(device, "close device %d\n", device->id);
984
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700985 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 unregister_early_suspend(&device->display_off);
987
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988 clk_put(pwr->ebi1_clk);
989
990 if (pwr->pcl)
991 msm_bus_scale_unregister_client(pwr->pcl);
992
993 pwr->pcl = 0;
994
995 if (pwr->gpu_reg) {
996 regulator_put(pwr->gpu_reg);
997 pwr->gpu_reg = NULL;
998 }
999
Pu Chen12053782012-07-24 17:04:27 -07001000 if (pwr->gpu_cx) {
1001 regulator_put(pwr->gpu_cx);
1002 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -07001003 }
1004
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005 for (i = 1; i < KGSL_MAX_CLKS; i++)
1006 if (pwr->grp_clks[i]) {
1007 clk_put(pwr->grp_clks[i]);
1008 pwr->grp_clks[i] = NULL;
1009 }
1010
1011 pwr->grp_clks[0] = NULL;
1012 pwr->power_flags = 0;
1013}
1014
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001015/**
1016 * kgsl_idle_check() - Work function for GPU interrupts and idle timeouts.
1017 * @device: The device
1018 *
1019 * This function is called for work that is queued by the interrupt
1020 * handler or the idle timer. It attempts to transition to a clocks
1021 * off state if the active_cnt is 0 and the hardware is idle.
1022 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001023void kgsl_idle_check(struct work_struct *work)
1024{
1025 struct kgsl_device *device = container_of(work, struct kgsl_device,
1026 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001027 WARN_ON(device == NULL);
1028 if (device == NULL)
1029 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030
1031 mutex_lock(&device->mutex);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -06001032
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001033 kgsl_pwrscale_idle(device);
1034
1035 if (device->state == KGSL_STATE_ACTIVE
1036 || device->state == KGSL_STATE_NAP) {
1037 if (device->active_cnt > 0 || kgsl_pwrctrl_sleep(device) != 0) {
1038
1039 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1040
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041 mod_timer(&device->idle_timer,
1042 jiffies +
1043 device->pwrctrl.interval_timeout);
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001044 /*
1045 * If the GPU has been too busy to sleep, make sure
1046 * that is acurately reflected in the % busy numbers.
1047 */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001048 device->pwrctrl.clk_stats.no_nap_cnt++;
1049 if (device->pwrctrl.clk_stats.no_nap_cnt >
1050 UPDATE_BUSY) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001051 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001052 device->pwrctrl.clk_stats.no_nap_cnt = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001053 }
1054 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001055 } else if (device->state & (KGSL_STATE_HUNG |
Tarun Karrad20d71a2013-01-25 15:38:57 -08001056 KGSL_STATE_DUMP_AND_FT)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -07001057 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 }
1059
1060 mutex_unlock(&device->mutex);
1061}
Jordan Crouse02aa37e2013-05-28 17:08:17 -06001062EXPORT_SYMBOL(kgsl_idle_check);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063
1064void kgsl_timer(unsigned long data)
1065{
1066 struct kgsl_device *device = (struct kgsl_device *) data;
1067
1068 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +05301069 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -07001070 if (device->pwrctrl.restore_slumber ||
1071 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -07001072 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
1073 else
1074 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075 /* Have work run in a non-interrupt context. */
1076 queue_work(device->work_queue, &device->idle_check_ws);
1077 }
1078}
1079
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001080
1081/**
1082 * kgsl_pre_hwaccess - Enforce preconditions for touching registers
1083 * @device: The device
1084 *
1085 * This function ensures that the correct lock is held and that the GPU
1086 * clock is on immediately before a register is read or written. Note
1087 * that this function does not check active_cnt because the registers
1088 * must be accessed during device start and stop, when the active_cnt
1089 * may legitimately be 0.
1090 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001091void kgsl_pre_hwaccess(struct kgsl_device *device)
1092{
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001093 /* In order to touch a register you must hold the device mutex...*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094 BUG_ON(!mutex_is_locked(&device->mutex));
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001095 /* and have the clock on! */
1096 BUG_ON(!test_bit(KGSL_PWRFLAGS_CLK_ON, &device->pwrctrl.power_flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097}
1098EXPORT_SYMBOL(kgsl_pre_hwaccess);
1099
Suman Tatiraju24569022011-10-27 11:11:12 -07001100static int
Jeremy Gebben388c2972011-12-16 09:05:07 -07001101_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -07001102{
Suman Tatiraju24569022011-10-27 11:11:12 -07001103 switch (device->state) {
1104 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001105 if (!device->ftbl->isidle(device)) {
1106 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1107 return -EBUSY;
1108 }
Hareesh Gundub17232c2013-07-22 17:34:59 +05301109 del_timer_sync(&device->hang_timer);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001110 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001111 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
1112 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Suman Tatiraju24569022011-10-27 11:11:12 -07001113 case KGSL_STATE_NAP:
1114 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001115 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -07001116 break;
1117 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001118 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -07001119 break;
1120 }
Jeremy Gebben388c2972011-12-16 09:05:07 -07001121 return 0;
1122}
1123
1124static void
1125_sleep_accounting(struct kgsl_device *device)
1126{
1127 kgsl_pwrctrl_busy_time(device, false);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001128 device->pwrctrl.clk_stats.start = ktime_set(0, 0);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001129 device->pwrctrl.time = 0;
1130 kgsl_pwrscale_sleep(device);
1131}
1132
1133static int
1134_sleep(struct kgsl_device *device)
1135{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001136 switch (device->state) {
1137 case KGSL_STATE_ACTIVE:
1138 if (!device->ftbl->isidle(device)) {
1139 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1140 return -EBUSY;
1141 }
1142 /* fall through */
1143 case KGSL_STATE_NAP:
1144 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
1145 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001146 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001147 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001148 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -07001149 pm_qos_update_request(&device->pm_qos_req_dma,
1150 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001151 break;
1152 case KGSL_STATE_SLEEP:
1153 case KGSL_STATE_SLUMBER:
1154 break;
1155 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001156 KGSL_PWR_WARN(device, "unhandled state %s\n",
1157 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001158 break;
1159 }
Shubhraprakash Das7a0c93c2012-11-20 15:15:08 -07001160
1161 kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
1162
Jeremy Gebben388c2972011-12-16 09:05:07 -07001163 return 0;
1164}
1165
1166static int
1167_slumber(struct kgsl_device *device)
1168{
1169 switch (device->state) {
1170 case KGSL_STATE_ACTIVE:
1171 if (!device->ftbl->isidle(device)) {
1172 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001173 return -EBUSY;
1174 }
1175 /* fall through */
1176 case KGSL_STATE_NAP:
1177 case KGSL_STATE_SLEEP:
1178 del_timer_sync(&device->idle_timer);
Hareesh Gundub17232c2013-07-22 17:34:59 +05301179 del_timer_sync(&device->hang_timer);
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001180 /* make sure power is on to stop the device*/
1181 kgsl_pwrctrl_enable(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001182 device->ftbl->suspend_context(device);
1183 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001184 _sleep_accounting(device);
1185 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -07001186 pm_qos_update_request(&device->pm_qos_req_dma,
1187 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001188 break;
1189 case KGSL_STATE_SLUMBER:
1190 break;
1191 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001192 KGSL_PWR_WARN(device, "unhandled state %s\n",
1193 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001194 break;
1195 }
1196 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -07001197}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198
1199/******************************************************************/
1200/* Caller must hold the device mutex. */
1201int kgsl_pwrctrl_sleep(struct kgsl_device *device)
1202{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001203 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
1205
1206 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -07001207 switch (device->requested_state) {
1208 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001209 status = _nap(device);
1210 break;
1211 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -07001212 status = _sleep(device);
Shubhraprakash Das602497c2013-05-28 16:53:24 -06001213 kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001214 break;
1215 case KGSL_STATE_SLUMBER:
1216 status = _slumber(device);
1217 break;
1218 default:
1219 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
1220 device->requested_state);
1221 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1222 status = -EINVAL;
1223 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224 }
Suman Tatiraju24569022011-10-27 11:11:12 -07001225 return status;
1226}
Jeremy Gebben388c2972011-12-16 09:05:07 -07001227EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -07001228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229/******************************************************************/
1230/* Caller must hold the device mutex. */
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001231int kgsl_pwrctrl_wake(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001232{
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001233 int status = 0;
Ranjhith Kalisamycb1721c2013-05-28 16:59:59 -06001234 unsigned int context_id;
1235 unsigned int state = device->state;
1236 unsigned int ts_processed = 0xdeaddead;
1237 struct kgsl_context *context;
1238
Jeremy Gebben388c2972011-12-16 09:05:07 -07001239 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
1240 switch (device->state) {
1241 case KGSL_STATE_SLUMBER:
Carter Cooper1013dda2013-05-28 17:07:13 -06001242 status = device->ftbl->start(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001243 if (status) {
1244 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1245 KGSL_DRV_ERR(device, "start failed %d\n", status);
1246 break;
1247 }
1248 /* fall through */
1249 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1251 kgsl_pwrscale_wake(device);
Ranjhith Kalisamycb1721c2013-05-28 16:59:59 -06001252 kgsl_sharedmem_readl(&device->memstore,
1253 (unsigned int *) &context_id,
1254 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
1255 current_context));
Jordan Crouse67db48d2013-05-28 17:04:17 -06001256 context = kgsl_context_get(device, context_id);
Ranjhith Kalisamycb1721c2013-05-28 16:59:59 -06001257 if (context)
1258 ts_processed = kgsl_readtimestamp(device, context,
1259 KGSL_TIMESTAMP_RETIRED);
1260 KGSL_PWR_INFO(device, "Wake from %s state. CTXT: %d RTRD TS: %08X\n",
1261 kgsl_pwrstate_to_str(state),
1262 context ? context->id : -1, ts_processed);
Jordan Crouse67db48d2013-05-28 17:04:17 -06001263 kgsl_context_put(context);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001264 /* fall through */
1265 case KGSL_STATE_NAP:
1266 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001267 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001268 /* Enable state before turning on irq */
1269 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
1270 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
1271 /* Re-enable HW access */
1272 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 jiffies + device->pwrctrl.interval_timeout);
Hareesh Gundub17232c2013-07-22 17:34:59 +05301274 mod_timer(&device->hang_timer,
1275 (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
Devin Kim66ad4c02012-09-21 20:28:50 -07001276 pm_qos_update_request(&device->pm_qos_req_dma,
1277 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001278 case KGSL_STATE_ACTIVE:
Vinay Roy65c41b32012-11-25 00:48:38 +05301279 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001280 break;
1281 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001282 KGSL_PWR_WARN(device, "unhandled state %s\n",
1283 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001284 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001285 status = -EINVAL;
Jeremy Gebben388c2972011-12-16 09:05:07 -07001286 break;
1287 }
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001288 return status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001289}
1290EXPORT_SYMBOL(kgsl_pwrctrl_wake);
1291
1292void kgsl_pwrctrl_enable(struct kgsl_device *device)
1293{
1294 /* Order pwrrail/clk sequence based upon platform */
1295 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001296 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001297 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1298}
1299EXPORT_SYMBOL(kgsl_pwrctrl_enable);
1300
1301void kgsl_pwrctrl_disable(struct kgsl_device *device)
1302{
1303 /* Order pwrrail/clk sequence based upon platform */
1304 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001305 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
1307}
1308EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001309
1310void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
1311{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001312 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001313 device->state = state;
1314 device->requested_state = KGSL_STATE_NONE;
1315}
1316EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
1317
1318void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
1319{
1320 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001321 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001322 device->requested_state = state;
1323}
1324EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001325
1326const char *kgsl_pwrstate_to_str(unsigned int state)
1327{
1328 switch (state) {
1329 case KGSL_STATE_NONE:
1330 return "NONE";
1331 case KGSL_STATE_INIT:
1332 return "INIT";
1333 case KGSL_STATE_ACTIVE:
1334 return "ACTIVE";
1335 case KGSL_STATE_NAP:
1336 return "NAP";
1337 case KGSL_STATE_SLEEP:
1338 return "SLEEP";
1339 case KGSL_STATE_SUSPEND:
1340 return "SUSPEND";
1341 case KGSL_STATE_HUNG:
1342 return "HUNG";
Tarun Karrad20d71a2013-01-25 15:38:57 -08001343 case KGSL_STATE_DUMP_AND_FT:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001344 return "DNR";
1345 case KGSL_STATE_SLUMBER:
1346 return "SLUMBER";
1347 default:
1348 break;
1349 }
1350 return "UNKNOWN";
1351}
1352EXPORT_SYMBOL(kgsl_pwrstate_to_str);
1353
Jeremy Gebben522dcff2013-05-28 17:07:32 -06001354
1355/**
1356 * kgsl_active_count_get() - Increase the device active count
1357 * @device: Pointer to a KGSL device
1358 *
1359 * Increase the active count for the KGSL device and turn on
1360 * clocks if this is the first reference. Code paths that need
1361 * to touch the hardware or wait for the hardware to complete
1362 * an operation must hold an active count reference until they
1363 * are finished. An error code will be returned if waking the
1364 * device fails. The device mutex must be held while *calling
1365 * this function.
1366 */
1367int kgsl_active_count_get(struct kgsl_device *device)
1368{
1369 int ret = 0;
1370 BUG_ON(!mutex_is_locked(&device->mutex));
1371
1372 if (device->active_cnt == 0) {
1373 if (device->requested_state == KGSL_STATE_SUSPEND ||
1374 device->state == KGSL_STATE_SUSPEND) {
1375 mutex_unlock(&device->mutex);
1376 wait_for_completion(&device->hwaccess_gate);
1377 mutex_lock(&device->mutex);
1378 } else if (device->state == KGSL_STATE_DUMP_AND_FT) {
1379 mutex_unlock(&device->mutex);
1380 wait_for_completion(&device->ft_gate);
1381 mutex_lock(&device->mutex);
1382 }
1383 ret = kgsl_pwrctrl_wake(device);
1384 }
1385 if (ret == 0)
1386 device->active_cnt++;
1387 return ret;
1388}
1389EXPORT_SYMBOL(kgsl_active_count_get);
1390
1391/**
1392 * kgsl_active_count_get_light() - Increase the device active count
1393 * @device: Pointer to a KGSL device
1394 *
1395 * Increase the active count for the KGSL device WITHOUT
1396 * turning on the clocks. Currently this is only used for creating
1397 * kgsl_events. The device mutex must be held while calling this function.
1398 */
1399int kgsl_active_count_get_light(struct kgsl_device *device)
1400{
1401 BUG_ON(!mutex_is_locked(&device->mutex));
1402
1403 if (device->state != KGSL_STATE_ACTIVE) {
1404 dev_WARN_ONCE(device->dev, 1, "device in unexpected state %s\n",
1405 kgsl_pwrstate_to_str(device->state));
1406 return -EINVAL;
1407 }
1408
1409 if (device->active_cnt == 0) {
1410 dev_WARN_ONCE(device->dev, 1, "active count is 0!\n");
1411 return -EINVAL;
1412 }
1413
1414 device->active_cnt++;
1415 return 0;
1416}
1417EXPORT_SYMBOL(kgsl_active_count_get_light);
1418
1419/**
1420 * kgsl_active_count_put() - Decrease the device active count
1421 * @device: Pointer to a KGSL device
1422 *
1423 * Decrease the active count for the KGSL device and turn off
1424 * clocks if there are no remaining references. This function will
1425 * transition the device to NAP if there are no other pending state
1426 * changes. It also completes the suspend gate. The device mutex must
1427 * be held while calling this function.
1428 */
1429void kgsl_active_count_put(struct kgsl_device *device)
1430{
1431 BUG_ON(!mutex_is_locked(&device->mutex));
1432 BUG_ON(device->active_cnt == 0);
1433
1434 kgsl_pwrscale_idle(device);
1435 if (device->active_cnt > 1) {
1436 device->active_cnt--;
1437 return;
1438 }
1439
1440 INIT_COMPLETION(device->suspend_gate);
1441
1442 if (device->pwrctrl.nap_allowed == true &&
1443 (device->state == KGSL_STATE_ACTIVE &&
1444 device->requested_state == KGSL_STATE_NONE)) {
1445 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
1446 if (kgsl_pwrctrl_sleep(device) != 0)
1447 mod_timer(&device->idle_timer,
1448 jiffies
1449 + device->pwrctrl.interval_timeout);
1450 }
1451 device->active_cnt--;
1452
1453 if (device->active_cnt == 0)
1454 complete(&device->suspend_gate);
1455}
1456EXPORT_SYMBOL(kgsl_active_count_put);
1457
1458/**
1459 * kgsl_active_count_wait() - Wait for activity to finish.
1460 * @device: Pointer to a KGSL device
1461 *
1462 * Block until all active_cnt users put() their reference.
1463 */
1464void kgsl_active_count_wait(struct kgsl_device *device)
1465{
1466 BUG_ON(!mutex_is_locked(&device->mutex));
1467
1468 if (device->active_cnt != 0) {
1469 mutex_unlock(&device->mutex);
1470 wait_for_completion(&device->suspend_gate);
1471 mutex_lock(&device->mutex);
1472 }
1473}
1474EXPORT_SYMBOL(kgsl_active_count_wait);