blob: 6f0cefec10fe0289984b968b20d904a65d69d1de [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080020#include <linux/ktime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_pwrscale.h"
24#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070025#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060027#define KGSL_PWRFLAGS_POWER_ON 0
28#define KGSL_PWRFLAGS_CLK_ON 1
29#define KGSL_PWRFLAGS_AXI_ON 2
30#define KGSL_PWRFLAGS_IRQ_ON 3
31
Lucille Sylvester10297892012-02-27 13:54:47 -070032#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070033#define UPDATE_BUSY_VAL 1000000
34#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060036struct clk_pair {
37 const char *name;
38 uint map;
39};
40
41struct clk_pair clks[KGSL_MAX_CLKS] = {
42 {
43 .name = "src_clk",
44 .map = KGSL_CLK_SRC,
45 },
46 {
47 .name = "core_clk",
48 .map = KGSL_CLK_CORE,
49 },
50 {
51 .name = "iface_clk",
52 .map = KGSL_CLK_IFACE,
53 },
54 {
55 .name = "mem_clk",
56 .map = KGSL_CLK_MEM,
57 },
58 {
59 .name = "mem_iface_clk",
60 .map = KGSL_CLK_MEM_IFACE,
61 },
62};
63
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080064/* Update the elapsed time at a particular clock level
65 * if the device is active(on_time = true).Otherwise
66 * store it as sleep time.
67 */
68static void update_clk_statistics(struct kgsl_device *device,
69 bool on_time)
70{
71 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
72 struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
73 ktime_t elapsed;
74 int elapsed_us;
75 if (clkstats->start.tv64 == 0)
76 clkstats->start = ktime_get();
77 clkstats->stop = ktime_get();
78 elapsed = ktime_sub(clkstats->stop, clkstats->start);
79 elapsed_us = ktime_to_us(elapsed);
80 clkstats->elapsed += elapsed_us;
81 if (on_time)
82 clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
83 else
84 clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
85 clkstats->start = ktime_get();
86}
87
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -070088/*
89 * Given a requested power level do bounds checking on the constraints and
90 * return the nearest possible level
91 */
92
93static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
94{
95 unsigned int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
96 pwr->max_pwrlevel);
97
98 unsigned int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
99 pwr->min_pwrlevel);
100
101 if (level < max_pwrlevel)
102 return max_pwrlevel;
103 if (level > min_pwrlevel)
104 return min_pwrlevel;
105
106 return level;
107}
108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
110 unsigned int new_level)
111{
112 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700113 struct kgsl_pwrlevel *pwrlevel;
114 int delta;
Jordan Crouse3c337a32012-12-04 16:16:51 -0700115 int level;
Jordan Crousea29a2e02012-08-14 09:09:23 -0600116
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700117 /* Adjust the power level to the current constraints */
118 new_level = _adjust_pwrlevel(pwr, new_level);
119
120 if (new_level == pwr->active_pwrlevel)
121 return;
122
123 delta = new_level < pwr->active_pwrlevel ? -1 : 1;
124
125 update_clk_statistics(device, true);
126
Jordan Crouse3c337a32012-12-04 16:16:51 -0700127 level = pwr->active_pwrlevel;
128
129 /*
130 * Set the active powerlevel first in case the clocks are off - if we
131 * don't do this then the pwrlevel change won't take effect when the
132 * clocks come back
133 */
134
135 pwr->active_pwrlevel = new_level;
136
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700137 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
138 (device->state == KGSL_STATE_NAP)) {
139
140 /*
141 * On some platforms, instability is caused on
142 * changing clock freq when the core is busy.
143 * Idle the gpu core before changing the clock freq.
144 */
145
146 if (pwr->idle_needed == true)
147 device->ftbl->idle(device);
148
149 /*
150 * Don't shift by more than one level at a time to
151 * avoid glitches.
152 */
153
Jordan Crouse3c337a32012-12-04 16:16:51 -0700154 while (level != new_level) {
155 level += delta;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700156
157 clk_set_rate(pwr->grp_clks[0],
Jordan Crouse3c337a32012-12-04 16:16:51 -0700158 pwr->pwrlevels[level].gpu_freq);
Kedar Joshic11d0982012-02-07 10:59:49 +0530159 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700160 }
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700161
162 pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
163
164 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
165
166 if (pwr->pcl)
167 msm_bus_scale_client_update_request(pwr->pcl,
168 pwrlevel->bus_freq);
169 else if (pwr->ebi1_clk)
170 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
171 }
172
173 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174}
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
177
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700178static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
179 struct device_attribute *attr,
180 const char *buf, size_t count)
181{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600183 struct kgsl_pwrctrl *pwr;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700184 int ret, level;
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600185
186 if (device == NULL)
187 return 0;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700188
189 pwr = &device->pwrctrl;
190
191 ret = sscanf(buf, "%d", &level);
192 if (ret != 1)
193 return count;
194
195 if (level < 0)
196 return count;
197
198 mutex_lock(&device->mutex);
199
200 if (level > pwr->num_pwrlevels - 2)
201 level = pwr->num_pwrlevels - 2;
202
203 pwr->thermal_pwrlevel = level;
204
205 /*
206 * If there is no power policy set the clock to the requested thermal
207 * level - if thermal now happens to be higher than max, then that will
208 * be limited by the pwrlevel change function. Otherwise if there is
209 * a policy only change the active clock if it is higher then the new
210 * thermal level
211 */
212
213 if (device->pwrscale.policy == NULL ||
214 pwr->thermal_pwrlevel > pwr->active_pwrlevel)
215 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
216
217 mutex_unlock(&device->mutex);
218
219 return count;
220}
221
222static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
223 struct device_attribute *attr,
224 char *buf)
225{
226
227 struct kgsl_device *device = kgsl_device_from_dev(dev);
228 struct kgsl_pwrctrl *pwr;
229 if (device == NULL)
230 return 0;
231 pwr = &device->pwrctrl;
232 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
233}
234
235static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
236 struct device_attribute *attr,
237 const char *buf, size_t count)
238{
239 struct kgsl_device *device = kgsl_device_from_dev(dev);
240 struct kgsl_pwrctrl *pwr;
241 int ret, level, max_level;
242
243 if (device == NULL)
244 return 0;
245
246 pwr = &device->pwrctrl;
247
248 ret = sscanf(buf, "%d", &level);
249 if (ret != 1)
250 return count;
251
252 /* If the use specifies a negative number, then don't change anything */
253 if (level < 0)
254 return count;
255
256 mutex_lock(&device->mutex);
257
258 /* You can't set a maximum power level lower than the minimum */
259 if (level > pwr->min_pwrlevel)
260 level = pwr->min_pwrlevel;
261
262 pwr->max_pwrlevel = level;
263
264
265 max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
266
267 /*
268 * If there is no policy then move to max by default. Otherwise only
269 * move max if the current level happens to be higher then the new max
270 */
271
272 if (device->pwrscale.policy == NULL ||
273 (max_level > pwr->active_pwrlevel))
274 kgsl_pwrctrl_pwrlevel_change(device, max_level);
275
276 mutex_unlock(&device->mutex);
277
278 return count;
279}
280
281static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
282 struct device_attribute *attr,
283 char *buf)
284{
285
286 struct kgsl_device *device = kgsl_device_from_dev(dev);
287 struct kgsl_pwrctrl *pwr;
288 if (device == NULL)
289 return 0;
290 pwr = &device->pwrctrl;
291 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
292}
293
294static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
295 struct device_attribute *attr,
296 const char *buf, size_t count)
297{ struct kgsl_device *device = kgsl_device_from_dev(dev);
298 struct kgsl_pwrctrl *pwr;
299 int ret, level, min_level;
300
301 if (device == NULL)
302 return 0;
303
304 pwr = &device->pwrctrl;
305
306 ret = sscanf(buf, "%d", &level);
307 if (ret != 1)
308 return count;
309
310 /* Don't do anything on obviously incorrect values */
311 if (level < 0)
312 return count;
313
314 mutex_lock(&device->mutex);
315 if (level > pwr->num_pwrlevels - 2)
316 level = pwr->num_pwrlevels - 2;
317
318 /* You can't set a minimum power level lower than the maximum */
319 if (level < pwr->max_pwrlevel)
320 level = pwr->max_pwrlevel;
321
322 pwr->min_pwrlevel = level;
323
324 min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
325
326 /* Only move the power level higher if minimum is higher then the
327 * current level
328 */
329
330 if (min_level < pwr->active_pwrlevel)
331 kgsl_pwrctrl_pwrlevel_change(device, min_level);
332
333 mutex_unlock(&device->mutex);
334
335 return count;
336}
337
338static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
339 struct device_attribute *attr,
340 char *buf)
341{
342 struct kgsl_device *device = kgsl_device_from_dev(dev);
343 struct kgsl_pwrctrl *pwr;
344 if (device == NULL)
345 return 0;
346 pwr = &device->pwrctrl;
347 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
348}
349
350static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
351 struct device_attribute *attr,
352 char *buf)
353{
354
355 struct kgsl_device *device = kgsl_device_from_dev(dev);
356 struct kgsl_pwrctrl *pwr;
357 if (device == NULL)
358 return 0;
359 pwr = &device->pwrctrl;
360 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
361}
362
363/* Given a GPU clock value, return the nearest powerlevel */
364
365static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
366{
367 int i;
368
369 for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
370 if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
371 return i;
372 }
373
374 return -ERANGE;
375}
376
377static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
378 struct device_attribute *attr,
379 const char *buf, size_t count)
380{
381 struct kgsl_device *device = kgsl_device_from_dev(dev);
382 struct kgsl_pwrctrl *pwr;
383 unsigned long val;
384 int ret, level;
385
386 if (device == NULL)
387 return 0;
388
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600389 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390
391 ret = sscanf(buf, "%ld", &val);
392 if (ret != 1)
393 return count;
394
395 mutex_lock(&device->mutex);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700396 level = _get_nearest_pwrlevel(pwr, val);
397 if (level < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 goto done;
399
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700400 pwr->thermal_pwrlevel = level;
401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 /*
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700403 * if the thermal limit is lower than the current setting,
404 * move the speed down immediately
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 */
406
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700407 if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409
410done:
411 mutex_unlock(&device->mutex);
412 return count;
413}
414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
416 struct device_attribute *attr,
417 char *buf)
418{
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600421 struct kgsl_pwrctrl *pwr;
422 if (device == NULL)
423 return 0;
424 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 return snprintf(buf, PAGE_SIZE, "%d\n",
426 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
427}
428
429static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
430 struct device_attribute *attr,
431 const char *buf, size_t count)
432{
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700433 struct kgsl_device *device = kgsl_device_from_dev(dev);
434 struct kgsl_pwrctrl *pwr;
435 unsigned long val;
436 int ret, level;
437
438 if (device == NULL)
439 return 0;
440
441 pwr = &device->pwrctrl;
442
443 ret = sscanf(buf, "%ld", &val);
444 if (ret != 1)
445 return count;
446
447 mutex_lock(&device->mutex);
448 level = _get_nearest_pwrlevel(pwr, val);
449 if (level >= 0)
450 kgsl_pwrctrl_pwrlevel_change(device, level);
451
452 mutex_unlock(&device->mutex);
453 return count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454}
455
456static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
457 struct device_attribute *attr,
458 char *buf)
459{
460 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600461 struct kgsl_pwrctrl *pwr;
462 if (device == NULL)
463 return 0;
464 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 return snprintf(buf, PAGE_SIZE, "%d\n",
466 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
467}
468
469static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
470 struct device_attribute *attr,
471 const char *buf, size_t count)
472{
473 char temp[20];
474 unsigned long val;
475 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600476 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 int rc;
478
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600479 if (device == NULL)
480 return 0;
481 pwr = &device->pwrctrl;
482
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 snprintf(temp, sizeof(temp), "%.*s",
484 (int)min(count, sizeof(temp) - 1), buf);
485 rc = strict_strtoul(temp, 0, &val);
486 if (rc)
487 return rc;
488
489 mutex_lock(&device->mutex);
490
491 if (val == 1)
492 pwr->nap_allowed = true;
493 else if (val == 0)
494 pwr->nap_allowed = false;
495
496 mutex_unlock(&device->mutex);
497
498 return count;
499}
500
501static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
502 struct device_attribute *attr,
503 char *buf)
504{
505 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600506 if (device == NULL)
507 return 0;
508 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509}
510
511
512static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
513 struct device_attribute *attr,
514 const char *buf, size_t count)
515{
516 char temp[20];
517 unsigned long val;
518 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600519 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 const long div = 1000/HZ;
521 static unsigned int org_interval_timeout = 1;
522 int rc;
523
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600524 if (device == NULL)
525 return 0;
526 pwr = &device->pwrctrl;
527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528 snprintf(temp, sizeof(temp), "%.*s",
529 (int)min(count, sizeof(temp) - 1), buf);
530 rc = strict_strtoul(temp, 0, &val);
531 if (rc)
532 return rc;
533
534 if (org_interval_timeout == 1)
535 org_interval_timeout = pwr->interval_timeout;
536
537 mutex_lock(&device->mutex);
538
539 /* Let the timeout be requested in ms, but convert to jiffies. */
540 val /= div;
541 if (val >= org_interval_timeout)
542 pwr->interval_timeout = val;
543
544 mutex_unlock(&device->mutex);
545
546 return count;
547}
548
549static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
550 struct device_attribute *attr,
551 char *buf)
552{
553 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600554 if (device == NULL)
555 return 0;
556 return snprintf(buf, PAGE_SIZE, "%d\n",
557 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558}
559
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700560static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
561 struct device_attribute *attr,
562 char *buf)
563{
564 int ret;
565 struct kgsl_device *device = kgsl_device_from_dev(dev);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800566 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
567 ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
568 clkstats->on_time_old, clkstats->elapsed_old);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700569 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800570 clkstats->on_time_old = 0;
571 clkstats->elapsed_old = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700572 }
573 return ret;
574}
575
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800576static int kgsl_pwrctrl_gputop_show(struct device *dev,
577 struct device_attribute *attr,
578 char *buf)
579{
580 int ret;
581 struct kgsl_device *device = kgsl_device_from_dev(dev);
582 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
583 int i = 0;
584 char *ptr = buf;
585
586 ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
587 clkstats->elapsed_old);
588 for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
589 i++, ptr += ret)
590 ret = snprintf(ptr, PAGE_SIZE, "%7d ",
591 clkstats->old_clock_time[i]);
592
593 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
594 clkstats->on_time_old = 0;
595 clkstats->elapsed_old = 0;
596 for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
597 clkstats->old_clock_time[i] = 0;
598 }
599 return (unsigned int) (ptr - buf);
600}
601
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530602static int kgsl_pwrctrl_gpu_available_frequencies_show(
603 struct device *dev,
604 struct device_attribute *attr,
605 char *buf)
606{
607 struct kgsl_device *device = kgsl_device_from_dev(dev);
608 struct kgsl_pwrctrl *pwr;
609 int index, num_chars = 0;
610
611 if (device == NULL)
612 return 0;
613 pwr = &device->pwrctrl;
614 for (index = 0; index < pwr->num_pwrlevels - 1; index++)
615 num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
616 pwr->pwrlevels[index].gpu_freq);
617 buf[num_chars++] = '\n';
618 return num_chars;
619}
620
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
622DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
623 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530624DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
626 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800627DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
628 NULL);
629DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700630 NULL);
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530631DEVICE_ATTR(gpu_available_frequencies, 0444,
632 kgsl_pwrctrl_gpu_available_frequencies_show,
633 NULL);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700634DEVICE_ATTR(max_pwrlevel, 0644,
635 kgsl_pwrctrl_max_pwrlevel_show,
636 kgsl_pwrctrl_max_pwrlevel_store);
637DEVICE_ATTR(min_pwrlevel, 0644,
638 kgsl_pwrctrl_min_pwrlevel_show,
639 kgsl_pwrctrl_min_pwrlevel_store);
640DEVICE_ATTR(thermal_pwrlevel, 0644,
641 kgsl_pwrctrl_thermal_pwrlevel_show,
642 kgsl_pwrctrl_thermal_pwrlevel_store);
643DEVICE_ATTR(num_pwrlevels, 0444,
644 kgsl_pwrctrl_num_pwrlevels_show,
645 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646
647static const struct device_attribute *pwrctrl_attr_list[] = {
648 &dev_attr_gpuclk,
649 &dev_attr_max_gpuclk,
650 &dev_attr_pwrnap,
651 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700652 &dev_attr_gpubusy,
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800653 &dev_attr_gputop,
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530654 &dev_attr_gpu_available_frequencies,
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700655 &dev_attr_max_pwrlevel,
656 &dev_attr_min_pwrlevel,
657 &dev_attr_thermal_pwrlevel,
658 &dev_attr_num_pwrlevels,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 NULL
660};
661
662int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
663{
664 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
665}
666
667void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
668{
669 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
670}
671
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800672static void update_statistics(struct kgsl_device *device)
673{
674 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
675 unsigned int on_time = 0;
676 int i;
677 int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
678 /*PER CLK TIME*/
679 for (i = 0; i < num_pwrlevels; i++) {
680 clkstats->old_clock_time[i] = clkstats->clock_time[i];
681 on_time += clkstats->clock_time[i];
682 clkstats->clock_time[i] = 0;
683 }
684 clkstats->old_clock_time[num_pwrlevels] =
685 clkstats->clock_time[num_pwrlevels];
686 clkstats->clock_time[num_pwrlevels] = 0;
687 clkstats->on_time_old = on_time;
688 clkstats->elapsed_old = clkstats->elapsed;
689 clkstats->elapsed = 0;
690}
691
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700692/* Track the amount of time the gpu is on vs the total system time. *
693 * Regularly update the percentage of busy time displayed by sysfs. */
694static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
695{
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800696 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
697 update_clk_statistics(device, on_time);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700698 /* Update the output regularly and reset the counters. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800699 if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700700 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800701 update_statistics(device);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700702 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700703}
704
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600705void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
706 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700707{
708 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
709 int i = 0;
710 if (state == KGSL_PWRFLAGS_OFF) {
711 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
712 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700713 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
715 if (pwr->grp_clks[i])
716 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700717 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700719 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720 clk_set_rate(pwr->grp_clks[0],
721 pwr->pwrlevels[pwr->num_pwrlevels - 1].
722 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700723 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
724 if (pwr->grp_clks[i])
725 clk_unprepare(pwr->grp_clks[i]);
726 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700727 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju426da0d2012-09-11 13:03:28 -0700728 } else if (requested_state == KGSL_STATE_SLEEP) {
729 /* High latency clock maintenance. */
730 if ((pwr->pwrlevels[0].gpu_freq > 0))
731 clk_set_rate(pwr->grp_clks[0],
732 pwr->pwrlevels[pwr->num_pwrlevels - 1].
733 gpu_freq);
734 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
735 if (pwr->grp_clks[i])
736 clk_unprepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737 }
738 } else if (state == KGSL_PWRFLAGS_ON) {
739 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
740 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700741 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700742 /* High latency clock maintenance. */
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530743 if (device->state != KGSL_STATE_NAP) {
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700744 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
745 if (pwr->grp_clks[i])
746 clk_prepare(pwr->grp_clks[i]);
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530747
748 if (pwr->pwrlevels[0].gpu_freq > 0)
749 clk_set_rate(pwr->grp_clks[0],
750 pwr->pwrlevels
751 [pwr->active_pwrlevel].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700753 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700754 /* as last step, enable grp_clk
755 this is to let GPU interrupt to come */
756 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
757 if (pwr->grp_clks[i])
758 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700759 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760 }
761 }
762}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763
764void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
765{
766 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
767
768 if (state == KGSL_PWRFLAGS_OFF) {
769 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
770 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700771 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530772 if (pwr->ebi1_clk) {
773 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600774 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530775 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 if (pwr->pcl)
777 msm_bus_scale_client_update_request(pwr->pcl,
778 0);
779 }
780 } else if (state == KGSL_PWRFLAGS_ON) {
781 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
782 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700783 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530784 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600785 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530786 clk_set_rate(pwr->ebi1_clk,
787 pwr->pwrlevels[pwr->active_pwrlevel].
788 bus_freq);
789 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 if (pwr->pcl)
791 msm_bus_scale_client_update_request(pwr->pcl,
792 pwr->pwrlevels[pwr->active_pwrlevel].
793 bus_freq);
794 }
795 }
796}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797
798void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
799{
800 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
801
802 if (state == KGSL_PWRFLAGS_OFF) {
803 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
804 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700805 trace_kgsl_rail(device, state);
Pu Chen12053782012-07-24 17:04:27 -0700806 if (pwr->gpu_cx)
807 regulator_disable(pwr->gpu_cx);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 if (pwr->gpu_reg)
809 regulator_disable(pwr->gpu_reg);
810 }
811 } else if (state == KGSL_PWRFLAGS_ON) {
812 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
813 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700814 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600815 if (pwr->gpu_reg) {
816 int status = regulator_enable(pwr->gpu_reg);
817 if (status)
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700818 KGSL_DRV_ERR(device,
819 "core regulator_enable "
820 "failed: %d\n",
821 status);
822 }
Pu Chen12053782012-07-24 17:04:27 -0700823 if (pwr->gpu_cx) {
824 int status = regulator_enable(pwr->gpu_cx);
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700825 if (status)
826 KGSL_DRV_ERR(device,
827 "cx regulator_enable "
828 "failed: %d\n",
829 status);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600830 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 }
832 }
833}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834
835void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
836{
837 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
838
839 if (state == KGSL_PWRFLAGS_ON) {
840 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
841 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700842 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 }
845 } else if (state == KGSL_PWRFLAGS_OFF) {
846 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
847 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700848 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600849 if (in_interrupt())
850 disable_irq_nosync(pwr->interrupt_num);
851 else
852 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 }
854 }
855}
856EXPORT_SYMBOL(kgsl_pwrctrl_irq);
857
858int kgsl_pwrctrl_init(struct kgsl_device *device)
859{
860 int i, result = 0;
861 struct clk *clk;
862 struct platform_device *pdev =
863 container_of(device->parentdev, struct platform_device, dev);
864 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600865 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866
867 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600868 for (i = 0; i < KGSL_MAX_CLKS; i++) {
869 if (pdata->clk_map & clks[i].map) {
870 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871 if (IS_ERR(clk))
872 goto clk_err;
873 pwr->grp_clks[i] = clk;
874 }
875 }
876 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600877 if (pwr->grp_clks[0] == NULL)
878 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700879
880 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600881 if (pdata->set_grp_async != NULL)
882 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600884 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600886 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 result = -EINVAL;
888 goto done;
889 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600890 pwr->num_pwrlevels = pdata->num_levels;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700891
892 /* Initialize the user and thermal clock constraints */
893
894 pwr->max_pwrlevel = 0;
895 pwr->min_pwrlevel = pdata->num_levels - 2;
896 pwr->thermal_pwrlevel = 0;
897
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600898 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800899 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600900 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600902 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600904 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 gpu_freq) : 0;
906 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600907 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600908 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600909 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910 }
911 /* Do not set_rate for targets in sync with AXI */
912 if (pwr->pwrlevels[0].gpu_freq > 0)
913 clk_set_rate(pwr->grp_clks[0], pwr->
914 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
915
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700916 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 if (IS_ERR(pwr->gpu_reg))
918 pwr->gpu_reg = NULL;
919
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700920 if (pwr->gpu_reg) {
Pu Chen12053782012-07-24 17:04:27 -0700921 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
922 if (IS_ERR(pwr->gpu_cx))
923 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700924 } else
Pu Chen12053782012-07-24 17:04:27 -0700925 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927 pwr->power_flags = 0;
928
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600929 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530930 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600931 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700932 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700933 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 if (IS_ERR(pwr->ebi1_clk))
935 pwr->ebi1_clk = NULL;
936 else
937 clk_set_rate(pwr->ebi1_clk,
938 pwr->pwrlevels[pwr->active_pwrlevel].
939 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600940 if (pdata->bus_scale_table != NULL) {
941 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 bus_scale_table);
943 if (!pwr->pcl) {
944 KGSL_PWR_ERR(device,
945 "msm_bus_scale_register_client failed: "
946 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600947 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948 result = -EINVAL;
949 goto done;
950 }
951 }
952
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700954 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955 register_early_suspend(&device->display_off);
956 return result;
957
958clk_err:
959 result = PTR_ERR(clk);
960 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600961 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962
963done:
964 return result;
965}
966
967void kgsl_pwrctrl_close(struct kgsl_device *device)
968{
969 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
970 int i;
971
972 KGSL_PWR_INFO(device, "close device %d\n", device->id);
973
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700974 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975 unregister_early_suspend(&device->display_off);
976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 clk_put(pwr->ebi1_clk);
978
979 if (pwr->pcl)
980 msm_bus_scale_unregister_client(pwr->pcl);
981
982 pwr->pcl = 0;
983
984 if (pwr->gpu_reg) {
985 regulator_put(pwr->gpu_reg);
986 pwr->gpu_reg = NULL;
987 }
988
Pu Chen12053782012-07-24 17:04:27 -0700989 if (pwr->gpu_cx) {
990 regulator_put(pwr->gpu_cx);
991 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700992 }
993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700994 for (i = 1; i < KGSL_MAX_CLKS; i++)
995 if (pwr->grp_clks[i]) {
996 clk_put(pwr->grp_clks[i]);
997 pwr->grp_clks[i] = NULL;
998 }
999
1000 pwr->grp_clks[0] = NULL;
1001 pwr->power_flags = 0;
1002}
1003
1004void kgsl_idle_check(struct work_struct *work)
1005{
1006 struct kgsl_device *device = container_of(work, struct kgsl_device,
1007 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001008 WARN_ON(device == NULL);
1009 if (device == NULL)
1010 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011
1012 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001013 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju3de11cc2012-06-20 10:33:32 -07001014 kgsl_pwrscale_idle(device, 0);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -06001015
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001016 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017 mod_timer(&device->idle_timer,
1018 jiffies +
1019 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001020 /* If the GPU has been too busy to sleep, make sure *
1021 * that is acurately reflected in the % busy numbers. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001022 device->pwrctrl.clk_stats.no_nap_cnt++;
1023 if (device->pwrctrl.clk_stats.no_nap_cnt >
1024 UPDATE_BUSY) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001025 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001026 device->pwrctrl.clk_stats.no_nap_cnt = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001027 }
1028 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001029 } else if (device->state & (KGSL_STATE_HUNG |
1030 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -07001031 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032 }
1033
1034 mutex_unlock(&device->mutex);
1035}
1036
1037void kgsl_timer(unsigned long data)
1038{
1039 struct kgsl_device *device = (struct kgsl_device *) data;
1040
1041 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +05301042 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -07001043 if (device->pwrctrl.restore_slumber ||
1044 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -07001045 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
1046 else
1047 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048 /* Have work run in a non-interrupt context. */
1049 queue_work(device->work_queue, &device->idle_check_ws);
1050 }
1051}
1052
1053void kgsl_pre_hwaccess(struct kgsl_device *device)
1054{
1055 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -07001056 switch (device->state) {
1057 case KGSL_STATE_ACTIVE:
1058 return;
1059 case KGSL_STATE_NAP:
1060 case KGSL_STATE_SLEEP:
1061 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -07001063 break;
1064 case KGSL_STATE_SUSPEND:
1065 kgsl_check_suspended(device);
1066 break;
1067 case KGSL_STATE_INIT:
1068 case KGSL_STATE_HUNG:
1069 case KGSL_STATE_DUMP_AND_RECOVER:
1070 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
1071 &device->pwrctrl.power_flags))
1072 break;
1073 else
1074 KGSL_PWR_ERR(device,
1075 "hw access while clocks off from state %d\n",
1076 device->state);
1077 break;
1078 default:
1079 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
1080 device->state);
1081 break;
1082 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001083}
1084EXPORT_SYMBOL(kgsl_pre_hwaccess);
1085
1086void kgsl_check_suspended(struct kgsl_device *device)
1087{
1088 if (device->requested_state == KGSL_STATE_SUSPEND ||
1089 device->state == KGSL_STATE_SUSPEND) {
1090 mutex_unlock(&device->mutex);
1091 wait_for_completion(&device->hwaccess_gate);
1092 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -07001093 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094 mutex_unlock(&device->mutex);
1095 wait_for_completion(&device->recovery_gate);
1096 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -07001097 } else if (device->state == KGSL_STATE_SLUMBER)
1098 kgsl_pwrctrl_wake(device);
1099}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100
Suman Tatiraju24569022011-10-27 11:11:12 -07001101static int
Jeremy Gebben388c2972011-12-16 09:05:07 -07001102_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -07001103{
Suman Tatiraju24569022011-10-27 11:11:12 -07001104 switch (device->state) {
1105 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001106 if (!device->ftbl->isidle(device)) {
1107 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1108 return -EBUSY;
1109 }
1110 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001111 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
1112 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Suman Tatiraju24569022011-10-27 11:11:12 -07001113 case KGSL_STATE_NAP:
1114 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001115 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -07001116 break;
1117 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001118 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -07001119 break;
1120 }
Jeremy Gebben388c2972011-12-16 09:05:07 -07001121 return 0;
1122}
1123
1124static void
1125_sleep_accounting(struct kgsl_device *device)
1126{
1127 kgsl_pwrctrl_busy_time(device, false);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001128 device->pwrctrl.clk_stats.start = ktime_set(0, 0);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001129 device->pwrctrl.time = 0;
1130 kgsl_pwrscale_sleep(device);
1131}
1132
1133static int
1134_sleep(struct kgsl_device *device)
1135{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001136 switch (device->state) {
1137 case KGSL_STATE_ACTIVE:
1138 if (!device->ftbl->isidle(device)) {
1139 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1140 return -EBUSY;
1141 }
1142 /* fall through */
1143 case KGSL_STATE_NAP:
1144 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
1145 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001146 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001147 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001148 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -07001149 pm_qos_update_request(&device->pm_qos_req_dma,
1150 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001151 break;
1152 case KGSL_STATE_SLEEP:
1153 case KGSL_STATE_SLUMBER:
1154 break;
1155 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001156 KGSL_PWR_WARN(device, "unhandled state %s\n",
1157 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001158 break;
1159 }
1160 return 0;
1161}
1162
1163static int
1164_slumber(struct kgsl_device *device)
1165{
1166 switch (device->state) {
1167 case KGSL_STATE_ACTIVE:
1168 if (!device->ftbl->isidle(device)) {
1169 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001170 return -EBUSY;
1171 }
1172 /* fall through */
1173 case KGSL_STATE_NAP:
1174 case KGSL_STATE_SLEEP:
1175 del_timer_sync(&device->idle_timer);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001176 device->ftbl->suspend_context(device);
1177 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001178 _sleep_accounting(device);
1179 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -07001180 pm_qos_update_request(&device->pm_qos_req_dma,
1181 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001182 break;
1183 case KGSL_STATE_SLUMBER:
1184 break;
1185 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001186 KGSL_PWR_WARN(device, "unhandled state %s\n",
1187 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001188 break;
1189 }
1190 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -07001191}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192
1193/******************************************************************/
1194/* Caller must hold the device mutex. */
1195int kgsl_pwrctrl_sleep(struct kgsl_device *device)
1196{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001197 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
1199
1200 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -07001201 switch (device->requested_state) {
1202 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001203 status = _nap(device);
1204 break;
1205 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -07001206 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001207 break;
1208 case KGSL_STATE_SLUMBER:
1209 status = _slumber(device);
1210 break;
1211 default:
1212 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
1213 device->requested_state);
1214 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1215 status = -EINVAL;
1216 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001217 }
Suman Tatiraju24569022011-10-27 11:11:12 -07001218 return status;
1219}
Jeremy Gebben388c2972011-12-16 09:05:07 -07001220EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -07001221
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222/******************************************************************/
1223/* Caller must hold the device mutex. */
1224void kgsl_pwrctrl_wake(struct kgsl_device *device)
1225{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001226 int status;
1227 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
1228 switch (device->state) {
1229 case KGSL_STATE_SLUMBER:
1230 status = device->ftbl->start(device, 0);
1231 if (status) {
1232 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1233 KGSL_DRV_ERR(device, "start failed %d\n", status);
1234 break;
1235 }
1236 /* fall through */
1237 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1239 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001240 /* fall through */
1241 case KGSL_STATE_NAP:
1242 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001243 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001244 /* Enable state before turning on irq */
1245 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
1246 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
1247 /* Re-enable HW access */
1248 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001249 jiffies + device->pwrctrl.interval_timeout);
Devin Kim66ad4c02012-09-21 20:28:50 -07001250 pm_qos_update_request(&device->pm_qos_req_dma,
1251 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001252 case KGSL_STATE_ACTIVE:
Vinay Roy65c41b32012-11-25 00:48:38 +05301253 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001254 break;
1255 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001256 KGSL_PWR_WARN(device, "unhandled state %s\n",
1257 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001258 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1259 break;
1260 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261}
1262EXPORT_SYMBOL(kgsl_pwrctrl_wake);
1263
1264void kgsl_pwrctrl_enable(struct kgsl_device *device)
1265{
1266 /* Order pwrrail/clk sequence based upon platform */
1267 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001268 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1270}
1271EXPORT_SYMBOL(kgsl_pwrctrl_enable);
1272
1273void kgsl_pwrctrl_disable(struct kgsl_device *device)
1274{
1275 /* Order pwrrail/clk sequence based upon platform */
1276 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001277 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
1279}
1280EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001281
1282void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
1283{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001284 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001285 device->state = state;
1286 device->requested_state = KGSL_STATE_NONE;
1287}
1288EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
1289
1290void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
1291{
1292 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001293 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001294 device->requested_state = state;
1295}
1296EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001297
1298const char *kgsl_pwrstate_to_str(unsigned int state)
1299{
1300 switch (state) {
1301 case KGSL_STATE_NONE:
1302 return "NONE";
1303 case KGSL_STATE_INIT:
1304 return "INIT";
1305 case KGSL_STATE_ACTIVE:
1306 return "ACTIVE";
1307 case KGSL_STATE_NAP:
1308 return "NAP";
1309 case KGSL_STATE_SLEEP:
1310 return "SLEEP";
1311 case KGSL_STATE_SUSPEND:
1312 return "SUSPEND";
1313 case KGSL_STATE_HUNG:
1314 return "HUNG";
1315 case KGSL_STATE_DUMP_AND_RECOVER:
1316 return "DNR";
1317 case KGSL_STATE_SLUMBER:
1318 return "SLUMBER";
1319 default:
1320 break;
1321 }
1322 return "UNKNOWN";
1323}
1324EXPORT_SYMBOL(kgsl_pwrstate_to_str);
1325