blob: b6b024ec6a68f451dfc98da4a9b85e3683499581 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080020#include <linux/ktime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_pwrscale.h"
24#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070025#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060027#define KGSL_PWRFLAGS_POWER_ON 0
28#define KGSL_PWRFLAGS_CLK_ON 1
29#define KGSL_PWRFLAGS_AXI_ON 2
30#define KGSL_PWRFLAGS_IRQ_ON 3
31
Lucille Sylvester10297892012-02-27 13:54:47 -070032#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070033#define UPDATE_BUSY_VAL 1000000
34#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060036struct clk_pair {
37 const char *name;
38 uint map;
39};
40
41struct clk_pair clks[KGSL_MAX_CLKS] = {
42 {
43 .name = "src_clk",
44 .map = KGSL_CLK_SRC,
45 },
46 {
47 .name = "core_clk",
48 .map = KGSL_CLK_CORE,
49 },
50 {
51 .name = "iface_clk",
52 .map = KGSL_CLK_IFACE,
53 },
54 {
55 .name = "mem_clk",
56 .map = KGSL_CLK_MEM,
57 },
58 {
59 .name = "mem_iface_clk",
60 .map = KGSL_CLK_MEM_IFACE,
61 },
62};
63
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080064/* Update the elapsed time at a particular clock level
65 * if the device is active(on_time = true).Otherwise
66 * store it as sleep time.
67 */
68static void update_clk_statistics(struct kgsl_device *device,
69 bool on_time)
70{
71 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
72 struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
73 ktime_t elapsed;
74 int elapsed_us;
75 if (clkstats->start.tv64 == 0)
76 clkstats->start = ktime_get();
77 clkstats->stop = ktime_get();
78 elapsed = ktime_sub(clkstats->stop, clkstats->start);
79 elapsed_us = ktime_to_us(elapsed);
80 clkstats->elapsed += elapsed_us;
81 if (on_time)
82 clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
83 else
84 clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
85 clkstats->start = ktime_get();
86}
87
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -070088/*
89 * Given a requested power level do bounds checking on the constraints and
90 * return the nearest possible level
91 */
92
93static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
94{
Jordan Crouse00a01ba2012-12-05 15:58:16 -070095 int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
96 int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -070097
98 if (level < max_pwrlevel)
99 return max_pwrlevel;
100 if (level > min_pwrlevel)
101 return min_pwrlevel;
102
103 return level;
104}
105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
107 unsigned int new_level)
108{
109 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700110 struct kgsl_pwrlevel *pwrlevel;
111 int delta;
Jordan Crouse3c337a32012-12-04 16:16:51 -0700112 int level;
Jordan Crousea29a2e02012-08-14 09:09:23 -0600113
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700114 /* Adjust the power level to the current constraints */
115 new_level = _adjust_pwrlevel(pwr, new_level);
116
117 if (new_level == pwr->active_pwrlevel)
118 return;
119
120 delta = new_level < pwr->active_pwrlevel ? -1 : 1;
121
122 update_clk_statistics(device, true);
123
Jordan Crouse3c337a32012-12-04 16:16:51 -0700124 level = pwr->active_pwrlevel;
125
126 /*
127 * Set the active powerlevel first in case the clocks are off - if we
128 * don't do this then the pwrlevel change won't take effect when the
129 * clocks come back
130 */
131
132 pwr->active_pwrlevel = new_level;
133
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700134 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
135 (device->state == KGSL_STATE_NAP)) {
136
137 /*
138 * On some platforms, instability is caused on
139 * changing clock freq when the core is busy.
140 * Idle the gpu core before changing the clock freq.
141 */
142
143 if (pwr->idle_needed == true)
144 device->ftbl->idle(device);
145
146 /*
147 * Don't shift by more than one level at a time to
148 * avoid glitches.
149 */
150
Jordan Crouse3c337a32012-12-04 16:16:51 -0700151 while (level != new_level) {
152 level += delta;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700153
154 clk_set_rate(pwr->grp_clks[0],
Jordan Crouse3c337a32012-12-04 16:16:51 -0700155 pwr->pwrlevels[level].gpu_freq);
Kedar Joshic11d0982012-02-07 10:59:49 +0530156 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 }
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700158
159 pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
160
161 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
162
163 if (pwr->pcl)
164 msm_bus_scale_client_update_request(pwr->pcl,
165 pwrlevel->bus_freq);
166 else if (pwr->ebi1_clk)
167 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
168 }
169
170 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171}
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
174
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700175static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
176 struct device_attribute *attr,
177 const char *buf, size_t count)
178{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600180 struct kgsl_pwrctrl *pwr;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700181 int ret, level;
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600182
183 if (device == NULL)
184 return 0;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700185
186 pwr = &device->pwrctrl;
187
188 ret = sscanf(buf, "%d", &level);
189 if (ret != 1)
190 return count;
191
192 if (level < 0)
193 return count;
194
195 mutex_lock(&device->mutex);
196
197 if (level > pwr->num_pwrlevels - 2)
198 level = pwr->num_pwrlevels - 2;
199
200 pwr->thermal_pwrlevel = level;
201
202 /*
203 * If there is no power policy set the clock to the requested thermal
204 * level - if thermal now happens to be higher than max, then that will
205 * be limited by the pwrlevel change function. Otherwise if there is
206 * a policy only change the active clock if it is higher then the new
207 * thermal level
208 */
209
210 if (device->pwrscale.policy == NULL ||
211 pwr->thermal_pwrlevel > pwr->active_pwrlevel)
212 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
213
214 mutex_unlock(&device->mutex);
215
216 return count;
217}
218
219static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
220 struct device_attribute *attr,
221 char *buf)
222{
223
224 struct kgsl_device *device = kgsl_device_from_dev(dev);
225 struct kgsl_pwrctrl *pwr;
226 if (device == NULL)
227 return 0;
228 pwr = &device->pwrctrl;
229 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
230}
231
232static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
233 struct device_attribute *attr,
234 const char *buf, size_t count)
235{
236 struct kgsl_device *device = kgsl_device_from_dev(dev);
237 struct kgsl_pwrctrl *pwr;
238 int ret, level, max_level;
239
240 if (device == NULL)
241 return 0;
242
243 pwr = &device->pwrctrl;
244
245 ret = sscanf(buf, "%d", &level);
246 if (ret != 1)
247 return count;
248
249 /* If the use specifies a negative number, then don't change anything */
250 if (level < 0)
251 return count;
252
253 mutex_lock(&device->mutex);
254
255 /* You can't set a maximum power level lower than the minimum */
256 if (level > pwr->min_pwrlevel)
257 level = pwr->min_pwrlevel;
258
259 pwr->max_pwrlevel = level;
260
261
262 max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
263
264 /*
265 * If there is no policy then move to max by default. Otherwise only
266 * move max if the current level happens to be higher then the new max
267 */
268
269 if (device->pwrscale.policy == NULL ||
270 (max_level > pwr->active_pwrlevel))
271 kgsl_pwrctrl_pwrlevel_change(device, max_level);
272
273 mutex_unlock(&device->mutex);
274
275 return count;
276}
277
278static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
279 struct device_attribute *attr,
280 char *buf)
281{
282
283 struct kgsl_device *device = kgsl_device_from_dev(dev);
284 struct kgsl_pwrctrl *pwr;
285 if (device == NULL)
286 return 0;
287 pwr = &device->pwrctrl;
288 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
289}
290
291static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
292 struct device_attribute *attr,
293 const char *buf, size_t count)
294{ struct kgsl_device *device = kgsl_device_from_dev(dev);
295 struct kgsl_pwrctrl *pwr;
296 int ret, level, min_level;
297
298 if (device == NULL)
299 return 0;
300
301 pwr = &device->pwrctrl;
302
303 ret = sscanf(buf, "%d", &level);
304 if (ret != 1)
305 return count;
306
307 /* Don't do anything on obviously incorrect values */
308 if (level < 0)
309 return count;
310
311 mutex_lock(&device->mutex);
312 if (level > pwr->num_pwrlevels - 2)
313 level = pwr->num_pwrlevels - 2;
314
315 /* You can't set a minimum power level lower than the maximum */
316 if (level < pwr->max_pwrlevel)
317 level = pwr->max_pwrlevel;
318
319 pwr->min_pwrlevel = level;
320
321 min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
322
323 /* Only move the power level higher if minimum is higher then the
324 * current level
325 */
326
327 if (min_level < pwr->active_pwrlevel)
328 kgsl_pwrctrl_pwrlevel_change(device, min_level);
329
330 mutex_unlock(&device->mutex);
331
332 return count;
333}
334
335static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
336 struct device_attribute *attr,
337 char *buf)
338{
339 struct kgsl_device *device = kgsl_device_from_dev(dev);
340 struct kgsl_pwrctrl *pwr;
341 if (device == NULL)
342 return 0;
343 pwr = &device->pwrctrl;
344 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
345}
346
347static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
348 struct device_attribute *attr,
349 char *buf)
350{
351
352 struct kgsl_device *device = kgsl_device_from_dev(dev);
353 struct kgsl_pwrctrl *pwr;
354 if (device == NULL)
355 return 0;
356 pwr = &device->pwrctrl;
357 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
358}
359
360/* Given a GPU clock value, return the nearest powerlevel */
361
362static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
363{
364 int i;
365
366 for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
367 if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
368 return i;
369 }
370
371 return -ERANGE;
372}
373
374static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
375 struct device_attribute *attr,
376 const char *buf, size_t count)
377{
378 struct kgsl_device *device = kgsl_device_from_dev(dev);
379 struct kgsl_pwrctrl *pwr;
380 unsigned long val;
381 int ret, level;
382
383 if (device == NULL)
384 return 0;
385
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600386 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387
388 ret = sscanf(buf, "%ld", &val);
389 if (ret != 1)
390 return count;
391
392 mutex_lock(&device->mutex);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700393 level = _get_nearest_pwrlevel(pwr, val);
394 if (level < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 goto done;
396
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700397 pwr->thermal_pwrlevel = level;
398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 /*
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700400 * if the thermal limit is lower than the current setting,
401 * move the speed down immediately
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 */
403
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700404 if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406
407done:
408 mutex_unlock(&device->mutex);
409 return count;
410}
411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
413 struct device_attribute *attr,
414 char *buf)
415{
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600418 struct kgsl_pwrctrl *pwr;
419 if (device == NULL)
420 return 0;
421 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422 return snprintf(buf, PAGE_SIZE, "%d\n",
423 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
424}
425
426static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
427 struct device_attribute *attr,
428 const char *buf, size_t count)
429{
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700430 struct kgsl_device *device = kgsl_device_from_dev(dev);
431 struct kgsl_pwrctrl *pwr;
432 unsigned long val;
433 int ret, level;
434
435 if (device == NULL)
436 return 0;
437
438 pwr = &device->pwrctrl;
439
440 ret = sscanf(buf, "%ld", &val);
441 if (ret != 1)
442 return count;
443
444 mutex_lock(&device->mutex);
445 level = _get_nearest_pwrlevel(pwr, val);
446 if (level >= 0)
447 kgsl_pwrctrl_pwrlevel_change(device, level);
448
449 mutex_unlock(&device->mutex);
450 return count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451}
452
453static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
454 struct device_attribute *attr,
455 char *buf)
456{
457 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600458 struct kgsl_pwrctrl *pwr;
459 if (device == NULL)
460 return 0;
461 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462 return snprintf(buf, PAGE_SIZE, "%d\n",
463 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
464}
465
466static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
467 struct device_attribute *attr,
468 const char *buf, size_t count)
469{
470 char temp[20];
471 unsigned long val;
472 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600473 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474 int rc;
475
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600476 if (device == NULL)
477 return 0;
478 pwr = &device->pwrctrl;
479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 snprintf(temp, sizeof(temp), "%.*s",
481 (int)min(count, sizeof(temp) - 1), buf);
482 rc = strict_strtoul(temp, 0, &val);
483 if (rc)
484 return rc;
485
486 mutex_lock(&device->mutex);
487
488 if (val == 1)
489 pwr->nap_allowed = true;
490 else if (val == 0)
491 pwr->nap_allowed = false;
492
493 mutex_unlock(&device->mutex);
494
495 return count;
496}
497
498static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
499 struct device_attribute *attr,
500 char *buf)
501{
502 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600503 if (device == NULL)
504 return 0;
505 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506}
507
508
509static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
510 struct device_attribute *attr,
511 const char *buf, size_t count)
512{
513 char temp[20];
514 unsigned long val;
515 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600516 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 const long div = 1000/HZ;
518 static unsigned int org_interval_timeout = 1;
519 int rc;
520
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600521 if (device == NULL)
522 return 0;
523 pwr = &device->pwrctrl;
524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525 snprintf(temp, sizeof(temp), "%.*s",
526 (int)min(count, sizeof(temp) - 1), buf);
527 rc = strict_strtoul(temp, 0, &val);
528 if (rc)
529 return rc;
530
531 if (org_interval_timeout == 1)
532 org_interval_timeout = pwr->interval_timeout;
533
534 mutex_lock(&device->mutex);
535
536 /* Let the timeout be requested in ms, but convert to jiffies. */
537 val /= div;
538 if (val >= org_interval_timeout)
539 pwr->interval_timeout = val;
540
541 mutex_unlock(&device->mutex);
542
543 return count;
544}
545
546static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
547 struct device_attribute *attr,
548 char *buf)
549{
550 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600551 if (device == NULL)
552 return 0;
553 return snprintf(buf, PAGE_SIZE, "%d\n",
554 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555}
556
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700557static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
558 struct device_attribute *attr,
559 char *buf)
560{
561 int ret;
562 struct kgsl_device *device = kgsl_device_from_dev(dev);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800563 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
564 ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
565 clkstats->on_time_old, clkstats->elapsed_old);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700566 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800567 clkstats->on_time_old = 0;
568 clkstats->elapsed_old = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700569 }
570 return ret;
571}
572
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800573static int kgsl_pwrctrl_gputop_show(struct device *dev,
574 struct device_attribute *attr,
575 char *buf)
576{
577 int ret;
578 struct kgsl_device *device = kgsl_device_from_dev(dev);
579 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
580 int i = 0;
581 char *ptr = buf;
582
583 ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
584 clkstats->elapsed_old);
585 for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
586 i++, ptr += ret)
587 ret = snprintf(ptr, PAGE_SIZE, "%7d ",
588 clkstats->old_clock_time[i]);
589
590 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
591 clkstats->on_time_old = 0;
592 clkstats->elapsed_old = 0;
593 for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
594 clkstats->old_clock_time[i] = 0;
595 }
596 return (unsigned int) (ptr - buf);
597}
598
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530599static int kgsl_pwrctrl_gpu_available_frequencies_show(
600 struct device *dev,
601 struct device_attribute *attr,
602 char *buf)
603{
604 struct kgsl_device *device = kgsl_device_from_dev(dev);
605 struct kgsl_pwrctrl *pwr;
606 int index, num_chars = 0;
607
608 if (device == NULL)
609 return 0;
610 pwr = &device->pwrctrl;
611 for (index = 0; index < pwr->num_pwrlevels - 1; index++)
612 num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
613 pwr->pwrlevels[index].gpu_freq);
614 buf[num_chars++] = '\n';
615 return num_chars;
616}
617
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
619DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
620 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530621DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
623 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800624DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
625 NULL);
626DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700627 NULL);
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530628DEVICE_ATTR(gpu_available_frequencies, 0444,
629 kgsl_pwrctrl_gpu_available_frequencies_show,
630 NULL);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700631DEVICE_ATTR(max_pwrlevel, 0644,
632 kgsl_pwrctrl_max_pwrlevel_show,
633 kgsl_pwrctrl_max_pwrlevel_store);
634DEVICE_ATTR(min_pwrlevel, 0644,
635 kgsl_pwrctrl_min_pwrlevel_show,
636 kgsl_pwrctrl_min_pwrlevel_store);
637DEVICE_ATTR(thermal_pwrlevel, 0644,
638 kgsl_pwrctrl_thermal_pwrlevel_show,
639 kgsl_pwrctrl_thermal_pwrlevel_store);
640DEVICE_ATTR(num_pwrlevels, 0444,
641 kgsl_pwrctrl_num_pwrlevels_show,
642 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643
644static const struct device_attribute *pwrctrl_attr_list[] = {
645 &dev_attr_gpuclk,
646 &dev_attr_max_gpuclk,
647 &dev_attr_pwrnap,
648 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700649 &dev_attr_gpubusy,
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800650 &dev_attr_gputop,
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530651 &dev_attr_gpu_available_frequencies,
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700652 &dev_attr_max_pwrlevel,
653 &dev_attr_min_pwrlevel,
654 &dev_attr_thermal_pwrlevel,
655 &dev_attr_num_pwrlevels,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656 NULL
657};
658
659int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
660{
661 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
662}
663
664void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
665{
666 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
667}
668
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800669static void update_statistics(struct kgsl_device *device)
670{
671 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
672 unsigned int on_time = 0;
673 int i;
674 int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
675 /*PER CLK TIME*/
676 for (i = 0; i < num_pwrlevels; i++) {
677 clkstats->old_clock_time[i] = clkstats->clock_time[i];
678 on_time += clkstats->clock_time[i];
679 clkstats->clock_time[i] = 0;
680 }
681 clkstats->old_clock_time[num_pwrlevels] =
682 clkstats->clock_time[num_pwrlevels];
683 clkstats->clock_time[num_pwrlevels] = 0;
684 clkstats->on_time_old = on_time;
685 clkstats->elapsed_old = clkstats->elapsed;
686 clkstats->elapsed = 0;
687}
688
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700689/* Track the amount of time the gpu is on vs the total system time. *
690 * Regularly update the percentage of busy time displayed by sysfs. */
691static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
692{
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800693 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
694 update_clk_statistics(device, on_time);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700695 /* Update the output regularly and reset the counters. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800696 if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700697 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800698 update_statistics(device);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700699 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700700}
701
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600702void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
703 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704{
705 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
706 int i = 0;
707 if (state == KGSL_PWRFLAGS_OFF) {
708 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
709 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700710 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
712 if (pwr->grp_clks[i])
713 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700714 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700716 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717 clk_set_rate(pwr->grp_clks[0],
718 pwr->pwrlevels[pwr->num_pwrlevels - 1].
719 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700720 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
721 if (pwr->grp_clks[i])
722 clk_unprepare(pwr->grp_clks[i]);
723 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700724 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju426da0d2012-09-11 13:03:28 -0700725 } else if (requested_state == KGSL_STATE_SLEEP) {
726 /* High latency clock maintenance. */
727 if ((pwr->pwrlevels[0].gpu_freq > 0))
728 clk_set_rate(pwr->grp_clks[0],
729 pwr->pwrlevels[pwr->num_pwrlevels - 1].
730 gpu_freq);
731 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
732 if (pwr->grp_clks[i])
733 clk_unprepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 }
735 } else if (state == KGSL_PWRFLAGS_ON) {
736 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
737 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700738 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700739 /* High latency clock maintenance. */
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530740 if (device->state != KGSL_STATE_NAP) {
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700741 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
742 if (pwr->grp_clks[i])
743 clk_prepare(pwr->grp_clks[i]);
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530744
745 if (pwr->pwrlevels[0].gpu_freq > 0)
746 clk_set_rate(pwr->grp_clks[0],
747 pwr->pwrlevels
748 [pwr->active_pwrlevel].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700750 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751 /* as last step, enable grp_clk
752 this is to let GPU interrupt to come */
753 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
754 if (pwr->grp_clks[i])
755 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700756 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 }
758 }
759}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760
761void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
762{
763 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
764
765 if (state == KGSL_PWRFLAGS_OFF) {
766 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
767 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700768 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530769 if (pwr->ebi1_clk) {
770 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600771 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530772 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700773 if (pwr->pcl)
774 msm_bus_scale_client_update_request(pwr->pcl,
775 0);
776 }
777 } else if (state == KGSL_PWRFLAGS_ON) {
778 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
779 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700780 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530781 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600782 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530783 clk_set_rate(pwr->ebi1_clk,
784 pwr->pwrlevels[pwr->active_pwrlevel].
785 bus_freq);
786 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 if (pwr->pcl)
788 msm_bus_scale_client_update_request(pwr->pcl,
789 pwr->pwrlevels[pwr->active_pwrlevel].
790 bus_freq);
791 }
792 }
793}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794
795void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
796{
797 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
798
799 if (state == KGSL_PWRFLAGS_OFF) {
800 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
801 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700802 trace_kgsl_rail(device, state);
Pu Chen12053782012-07-24 17:04:27 -0700803 if (pwr->gpu_cx)
804 regulator_disable(pwr->gpu_cx);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700805 if (pwr->gpu_reg)
806 regulator_disable(pwr->gpu_reg);
807 }
808 } else if (state == KGSL_PWRFLAGS_ON) {
809 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
810 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700811 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600812 if (pwr->gpu_reg) {
813 int status = regulator_enable(pwr->gpu_reg);
814 if (status)
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700815 KGSL_DRV_ERR(device,
816 "core regulator_enable "
817 "failed: %d\n",
818 status);
819 }
Pu Chen12053782012-07-24 17:04:27 -0700820 if (pwr->gpu_cx) {
821 int status = regulator_enable(pwr->gpu_cx);
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700822 if (status)
823 KGSL_DRV_ERR(device,
824 "cx regulator_enable "
825 "failed: %d\n",
826 status);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600827 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 }
829 }
830}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831
832void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
833{
834 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
835
836 if (state == KGSL_PWRFLAGS_ON) {
837 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
838 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700839 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 }
842 } else if (state == KGSL_PWRFLAGS_OFF) {
843 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
844 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700845 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600846 if (in_interrupt())
847 disable_irq_nosync(pwr->interrupt_num);
848 else
849 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 }
851 }
852}
853EXPORT_SYMBOL(kgsl_pwrctrl_irq);
854
855int kgsl_pwrctrl_init(struct kgsl_device *device)
856{
857 int i, result = 0;
858 struct clk *clk;
859 struct platform_device *pdev =
860 container_of(device->parentdev, struct platform_device, dev);
861 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600862 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863
864 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600865 for (i = 0; i < KGSL_MAX_CLKS; i++) {
866 if (pdata->clk_map & clks[i].map) {
867 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700868 if (IS_ERR(clk))
869 goto clk_err;
870 pwr->grp_clks[i] = clk;
871 }
872 }
873 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600874 if (pwr->grp_clks[0] == NULL)
875 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876
877 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600878 if (pdata->set_grp_async != NULL)
879 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880
Lucille Sylvesterd260c882013-05-28 16:59:46 -0600881 if (pdata->num_levels > KGSL_MAX_PWRLEVELS ||
882 pdata->num_levels < 1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600884 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 result = -EINVAL;
886 goto done;
887 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600888 pwr->num_pwrlevels = pdata->num_levels;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700889
890 /* Initialize the user and thermal clock constraints */
891
892 pwr->max_pwrlevel = 0;
893 pwr->min_pwrlevel = pdata->num_levels - 2;
894 pwr->thermal_pwrlevel = 0;
895
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600896 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800897 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600898 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600900 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600902 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903 gpu_freq) : 0;
904 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600905 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600906 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600907 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908 }
909 /* Do not set_rate for targets in sync with AXI */
910 if (pwr->pwrlevels[0].gpu_freq > 0)
911 clk_set_rate(pwr->grp_clks[0], pwr->
912 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
913
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700914 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915 if (IS_ERR(pwr->gpu_reg))
916 pwr->gpu_reg = NULL;
917
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700918 if (pwr->gpu_reg) {
Pu Chen12053782012-07-24 17:04:27 -0700919 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
920 if (IS_ERR(pwr->gpu_cx))
921 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700922 } else
Pu Chen12053782012-07-24 17:04:27 -0700923 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700924
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 pwr->power_flags = 0;
926
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600927 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530928 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600929 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700930 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700931 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 if (IS_ERR(pwr->ebi1_clk))
933 pwr->ebi1_clk = NULL;
934 else
935 clk_set_rate(pwr->ebi1_clk,
936 pwr->pwrlevels[pwr->active_pwrlevel].
937 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600938 if (pdata->bus_scale_table != NULL) {
939 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940 bus_scale_table);
941 if (!pwr->pcl) {
942 KGSL_PWR_ERR(device,
943 "msm_bus_scale_register_client failed: "
944 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600945 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946 result = -EINVAL;
947 goto done;
948 }
949 }
950
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700952 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953 register_early_suspend(&device->display_off);
954 return result;
955
956clk_err:
957 result = PTR_ERR(clk);
958 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600959 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960
961done:
962 return result;
963}
964
965void kgsl_pwrctrl_close(struct kgsl_device *device)
966{
967 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
968 int i;
969
970 KGSL_PWR_INFO(device, "close device %d\n", device->id);
971
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700972 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 unregister_early_suspend(&device->display_off);
974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975 clk_put(pwr->ebi1_clk);
976
977 if (pwr->pcl)
978 msm_bus_scale_unregister_client(pwr->pcl);
979
980 pwr->pcl = 0;
981
982 if (pwr->gpu_reg) {
983 regulator_put(pwr->gpu_reg);
984 pwr->gpu_reg = NULL;
985 }
986
Pu Chen12053782012-07-24 17:04:27 -0700987 if (pwr->gpu_cx) {
988 regulator_put(pwr->gpu_cx);
989 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700990 }
991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 for (i = 1; i < KGSL_MAX_CLKS; i++)
993 if (pwr->grp_clks[i]) {
994 clk_put(pwr->grp_clks[i]);
995 pwr->grp_clks[i] = NULL;
996 }
997
998 pwr->grp_clks[0] = NULL;
999 pwr->power_flags = 0;
1000}
1001
1002void kgsl_idle_check(struct work_struct *work)
1003{
1004 struct kgsl_device *device = container_of(work, struct kgsl_device,
1005 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001006 WARN_ON(device == NULL);
1007 if (device == NULL)
1008 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009
1010 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Lucille Sylvester721f7e72012-08-21 16:31:26 -06001012 kgsl_pwrscale_idle(device);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -06001013
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001014 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015 mod_timer(&device->idle_timer,
1016 jiffies +
1017 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001018 /* If the GPU has been too busy to sleep, make sure *
1019 * that is acurately reflected in the % busy numbers. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001020 device->pwrctrl.clk_stats.no_nap_cnt++;
1021 if (device->pwrctrl.clk_stats.no_nap_cnt >
1022 UPDATE_BUSY) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001023 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001024 device->pwrctrl.clk_stats.no_nap_cnt = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001025 }
1026 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001027 } else if (device->state & (KGSL_STATE_HUNG |
Tarun Karrad20d71a2013-01-25 15:38:57 -08001028 KGSL_STATE_DUMP_AND_FT)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -07001029 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030 }
1031
1032 mutex_unlock(&device->mutex);
1033}
1034
1035void kgsl_timer(unsigned long data)
1036{
1037 struct kgsl_device *device = (struct kgsl_device *) data;
1038
1039 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +05301040 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -07001041 if (device->pwrctrl.restore_slumber ||
1042 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -07001043 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
1044 else
1045 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046 /* Have work run in a non-interrupt context. */
1047 queue_work(device->work_queue, &device->idle_check_ws);
1048 }
1049}
1050
1051void kgsl_pre_hwaccess(struct kgsl_device *device)
1052{
1053 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -07001054 switch (device->state) {
1055 case KGSL_STATE_ACTIVE:
1056 return;
1057 case KGSL_STATE_NAP:
1058 case KGSL_STATE_SLEEP:
1059 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -07001061 break;
1062 case KGSL_STATE_SUSPEND:
1063 kgsl_check_suspended(device);
1064 break;
1065 case KGSL_STATE_INIT:
1066 case KGSL_STATE_HUNG:
Tarun Karrad20d71a2013-01-25 15:38:57 -08001067 case KGSL_STATE_DUMP_AND_FT:
Lucille Sylvester8b803e92012-01-12 15:19:55 -07001068 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
1069 &device->pwrctrl.power_flags))
1070 break;
1071 else
1072 KGSL_PWR_ERR(device,
1073 "hw access while clocks off from state %d\n",
1074 device->state);
1075 break;
1076 default:
1077 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
1078 device->state);
1079 break;
1080 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081}
1082EXPORT_SYMBOL(kgsl_pre_hwaccess);
1083
1084void kgsl_check_suspended(struct kgsl_device *device)
1085{
1086 if (device->requested_state == KGSL_STATE_SUSPEND ||
1087 device->state == KGSL_STATE_SUSPEND) {
1088 mutex_unlock(&device->mutex);
1089 wait_for_completion(&device->hwaccess_gate);
1090 mutex_lock(&device->mutex);
Tarun Karrad20d71a2013-01-25 15:38:57 -08001091 } else if (device->state == KGSL_STATE_DUMP_AND_FT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001092 mutex_unlock(&device->mutex);
Tarun Karrad20d71a2013-01-25 15:38:57 -08001093 wait_for_completion(&device->ft_gate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -07001095 } else if (device->state == KGSL_STATE_SLUMBER)
1096 kgsl_pwrctrl_wake(device);
1097}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001098
Suman Tatiraju24569022011-10-27 11:11:12 -07001099static int
Jeremy Gebben388c2972011-12-16 09:05:07 -07001100_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -07001101{
Suman Tatiraju24569022011-10-27 11:11:12 -07001102 switch (device->state) {
1103 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001104 if (!device->ftbl->isidle(device)) {
1105 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1106 return -EBUSY;
1107 }
1108 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001109 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
1110 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Suman Tatiraju24569022011-10-27 11:11:12 -07001111 case KGSL_STATE_NAP:
1112 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001113 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -07001114 break;
1115 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001116 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -07001117 break;
1118 }
Jeremy Gebben388c2972011-12-16 09:05:07 -07001119 return 0;
1120}
1121
1122static void
1123_sleep_accounting(struct kgsl_device *device)
1124{
1125 kgsl_pwrctrl_busy_time(device, false);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001126 device->pwrctrl.clk_stats.start = ktime_set(0, 0);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001127 device->pwrctrl.time = 0;
1128 kgsl_pwrscale_sleep(device);
1129}
1130
1131static int
1132_sleep(struct kgsl_device *device)
1133{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001134 switch (device->state) {
1135 case KGSL_STATE_ACTIVE:
1136 if (!device->ftbl->isidle(device)) {
1137 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1138 return -EBUSY;
1139 }
1140 /* fall through */
1141 case KGSL_STATE_NAP:
1142 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
1143 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001144 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001145 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001146 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -07001147 pm_qos_update_request(&device->pm_qos_req_dma,
1148 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001149 break;
1150 case KGSL_STATE_SLEEP:
1151 case KGSL_STATE_SLUMBER:
1152 break;
1153 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001154 KGSL_PWR_WARN(device, "unhandled state %s\n",
1155 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001156 break;
1157 }
Shubhraprakash Das7a0c93c2012-11-20 15:15:08 -07001158
1159 kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
1160
Jeremy Gebben388c2972011-12-16 09:05:07 -07001161 return 0;
1162}
1163
1164static int
1165_slumber(struct kgsl_device *device)
1166{
1167 switch (device->state) {
1168 case KGSL_STATE_ACTIVE:
1169 if (!device->ftbl->isidle(device)) {
1170 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001171 return -EBUSY;
1172 }
1173 /* fall through */
1174 case KGSL_STATE_NAP:
1175 case KGSL_STATE_SLEEP:
1176 del_timer_sync(&device->idle_timer);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001177 device->ftbl->suspend_context(device);
1178 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001179 _sleep_accounting(device);
1180 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -07001181 pm_qos_update_request(&device->pm_qos_req_dma,
1182 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001183 break;
1184 case KGSL_STATE_SLUMBER:
1185 break;
1186 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001187 KGSL_PWR_WARN(device, "unhandled state %s\n",
1188 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001189 break;
1190 }
1191 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -07001192}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193
1194/******************************************************************/
1195/* Caller must hold the device mutex. */
1196int kgsl_pwrctrl_sleep(struct kgsl_device *device)
1197{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001198 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
1200
1201 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -07001202 switch (device->requested_state) {
1203 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001204 status = _nap(device);
1205 break;
1206 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -07001207 status = _sleep(device);
Shubhraprakash Das602497c2013-05-28 16:53:24 -06001208 kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001209 break;
1210 case KGSL_STATE_SLUMBER:
1211 status = _slumber(device);
1212 break;
1213 default:
1214 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
1215 device->requested_state);
1216 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1217 status = -EINVAL;
1218 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219 }
Suman Tatiraju24569022011-10-27 11:11:12 -07001220 return status;
1221}
Jeremy Gebben388c2972011-12-16 09:05:07 -07001222EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -07001223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224/******************************************************************/
1225/* Caller must hold the device mutex. */
1226void kgsl_pwrctrl_wake(struct kgsl_device *device)
1227{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001228 int status;
1229 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
1230 switch (device->state) {
1231 case KGSL_STATE_SLUMBER:
1232 status = device->ftbl->start(device, 0);
1233 if (status) {
1234 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1235 KGSL_DRV_ERR(device, "start failed %d\n", status);
1236 break;
1237 }
1238 /* fall through */
1239 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1241 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001242 /* fall through */
1243 case KGSL_STATE_NAP:
1244 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001245 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001246 /* Enable state before turning on irq */
1247 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
1248 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
1249 /* Re-enable HW access */
1250 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 jiffies + device->pwrctrl.interval_timeout);
Devin Kim66ad4c02012-09-21 20:28:50 -07001252 pm_qos_update_request(&device->pm_qos_req_dma,
1253 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001254 case KGSL_STATE_ACTIVE:
Vinay Roy65c41b32012-11-25 00:48:38 +05301255 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001256 break;
1257 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001258 KGSL_PWR_WARN(device, "unhandled state %s\n",
1259 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001260 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1261 break;
1262 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263}
1264EXPORT_SYMBOL(kgsl_pwrctrl_wake);
1265
1266void kgsl_pwrctrl_enable(struct kgsl_device *device)
1267{
1268 /* Order pwrrail/clk sequence based upon platform */
1269 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001270 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001271 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1272}
1273EXPORT_SYMBOL(kgsl_pwrctrl_enable);
1274
1275void kgsl_pwrctrl_disable(struct kgsl_device *device)
1276{
1277 /* Order pwrrail/clk sequence based upon platform */
1278 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001279 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
1281}
1282EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001283
1284void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
1285{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001286 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001287 device->state = state;
1288 device->requested_state = KGSL_STATE_NONE;
1289}
1290EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
1291
1292void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
1293{
1294 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001295 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001296 device->requested_state = state;
1297}
1298EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001299
1300const char *kgsl_pwrstate_to_str(unsigned int state)
1301{
1302 switch (state) {
1303 case KGSL_STATE_NONE:
1304 return "NONE";
1305 case KGSL_STATE_INIT:
1306 return "INIT";
1307 case KGSL_STATE_ACTIVE:
1308 return "ACTIVE";
1309 case KGSL_STATE_NAP:
1310 return "NAP";
1311 case KGSL_STATE_SLEEP:
1312 return "SLEEP";
1313 case KGSL_STATE_SUSPEND:
1314 return "SUSPEND";
1315 case KGSL_STATE_HUNG:
1316 return "HUNG";
Tarun Karrad20d71a2013-01-25 15:38:57 -08001317 case KGSL_STATE_DUMP_AND_FT:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001318 return "DNR";
1319 case KGSL_STATE_SLUMBER:
1320 return "SLUMBER";
1321 default:
1322 break;
1323 }
1324 return "UNKNOWN";
1325}
1326EXPORT_SYMBOL(kgsl_pwrstate_to_str);
1327