blob: 7f0d30b452af21e87053cb4c8db90d142059c85f [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080020#include <linux/ktime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_pwrscale.h"
24#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070025#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060027#define KGSL_PWRFLAGS_POWER_ON 0
28#define KGSL_PWRFLAGS_CLK_ON 1
29#define KGSL_PWRFLAGS_AXI_ON 2
30#define KGSL_PWRFLAGS_IRQ_ON 3
31
Lucille Sylvester10297892012-02-27 13:54:47 -070032#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070033#define UPDATE_BUSY_VAL 1000000
34#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060036struct clk_pair {
37 const char *name;
38 uint map;
39};
40
41struct clk_pair clks[KGSL_MAX_CLKS] = {
42 {
43 .name = "src_clk",
44 .map = KGSL_CLK_SRC,
45 },
46 {
47 .name = "core_clk",
48 .map = KGSL_CLK_CORE,
49 },
50 {
51 .name = "iface_clk",
52 .map = KGSL_CLK_IFACE,
53 },
54 {
55 .name = "mem_clk",
56 .map = KGSL_CLK_MEM,
57 },
58 {
59 .name = "mem_iface_clk",
60 .map = KGSL_CLK_MEM_IFACE,
61 },
62};
63
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080064/* Update the elapsed time at a particular clock level
65 * if the device is active(on_time = true).Otherwise
66 * store it as sleep time.
67 */
68static void update_clk_statistics(struct kgsl_device *device,
69 bool on_time)
70{
71 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
72 struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
73 ktime_t elapsed;
74 int elapsed_us;
75 if (clkstats->start.tv64 == 0)
76 clkstats->start = ktime_get();
77 clkstats->stop = ktime_get();
78 elapsed = ktime_sub(clkstats->stop, clkstats->start);
79 elapsed_us = ktime_to_us(elapsed);
80 clkstats->elapsed += elapsed_us;
81 if (on_time)
82 clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
83 else
84 clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
85 clkstats->start = ktime_get();
86}
87
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -070088/*
89 * Given a requested power level do bounds checking on the constraints and
90 * return the nearest possible level
91 */
92
93static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
94{
95 unsigned int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
96 pwr->max_pwrlevel);
97
98 unsigned int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
99 pwr->min_pwrlevel);
100
101 if (level < max_pwrlevel)
102 return max_pwrlevel;
103 if (level > min_pwrlevel)
104 return min_pwrlevel;
105
106 return level;
107}
108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
110 unsigned int new_level)
111{
112 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700113 struct kgsl_pwrlevel *pwrlevel;
114 int delta;
Jordan Crousea29a2e02012-08-14 09:09:23 -0600115
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700116 /* Adjust the power level to the current constraints */
117 new_level = _adjust_pwrlevel(pwr, new_level);
118
119 if (new_level == pwr->active_pwrlevel)
120 return;
121
122 delta = new_level < pwr->active_pwrlevel ? -1 : 1;
123
124 update_clk_statistics(device, true);
125
126 if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
127 (device->state == KGSL_STATE_NAP)) {
128
129 /*
130 * On some platforms, instability is caused on
131 * changing clock freq when the core is busy.
132 * Idle the gpu core before changing the clock freq.
133 */
134
135 if (pwr->idle_needed == true)
136 device->ftbl->idle(device);
137
138 /*
139 * Don't shift by more than one level at a time to
140 * avoid glitches.
141 */
142
143 while (pwr->active_pwrlevel != new_level) {
144 pwr->active_pwrlevel += delta;
145
146 clk_set_rate(pwr->grp_clks[0],
147 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
Kedar Joshic11d0982012-02-07 10:59:49 +0530148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 }
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700150
151 pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
152
153 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
154
155 if (pwr->pcl)
156 msm_bus_scale_client_update_request(pwr->pcl,
157 pwrlevel->bus_freq);
158 else if (pwr->ebi1_clk)
159 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
160 }
161
162 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163}
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700164
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
166
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700167static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
168 struct device_attribute *attr,
169 const char *buf, size_t count)
170{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600172 struct kgsl_pwrctrl *pwr;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700173 int ret, level;
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600174
175 if (device == NULL)
176 return 0;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700177
178 pwr = &device->pwrctrl;
179
180 ret = sscanf(buf, "%d", &level);
181 if (ret != 1)
182 return count;
183
184 if (level < 0)
185 return count;
186
187 mutex_lock(&device->mutex);
188
189 if (level > pwr->num_pwrlevels - 2)
190 level = pwr->num_pwrlevels - 2;
191
192 pwr->thermal_pwrlevel = level;
193
194 /*
195 * If there is no power policy set the clock to the requested thermal
196 * level - if thermal now happens to be higher than max, then that will
197 * be limited by the pwrlevel change function. Otherwise if there is
198 * a policy only change the active clock if it is higher then the new
199 * thermal level
200 */
201
202 if (device->pwrscale.policy == NULL ||
203 pwr->thermal_pwrlevel > pwr->active_pwrlevel)
204 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
205
206 mutex_unlock(&device->mutex);
207
208 return count;
209}
210
211static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
212 struct device_attribute *attr,
213 char *buf)
214{
215
216 struct kgsl_device *device = kgsl_device_from_dev(dev);
217 struct kgsl_pwrctrl *pwr;
218 if (device == NULL)
219 return 0;
220 pwr = &device->pwrctrl;
221 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
222}
223
224static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count)
227{
228 struct kgsl_device *device = kgsl_device_from_dev(dev);
229 struct kgsl_pwrctrl *pwr;
230 int ret, level, max_level;
231
232 if (device == NULL)
233 return 0;
234
235 pwr = &device->pwrctrl;
236
237 ret = sscanf(buf, "%d", &level);
238 if (ret != 1)
239 return count;
240
241 /* If the use specifies a negative number, then don't change anything */
242 if (level < 0)
243 return count;
244
245 mutex_lock(&device->mutex);
246
247 /* You can't set a maximum power level lower than the minimum */
248 if (level > pwr->min_pwrlevel)
249 level = pwr->min_pwrlevel;
250
251 pwr->max_pwrlevel = level;
252
253
254 max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
255
256 /*
257 * If there is no policy then move to max by default. Otherwise only
258 * move max if the current level happens to be higher then the new max
259 */
260
261 if (device->pwrscale.policy == NULL ||
262 (max_level > pwr->active_pwrlevel))
263 kgsl_pwrctrl_pwrlevel_change(device, max_level);
264
265 mutex_unlock(&device->mutex);
266
267 return count;
268}
269
270static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
271 struct device_attribute *attr,
272 char *buf)
273{
274
275 struct kgsl_device *device = kgsl_device_from_dev(dev);
276 struct kgsl_pwrctrl *pwr;
277 if (device == NULL)
278 return 0;
279 pwr = &device->pwrctrl;
280 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
281}
282
283static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
284 struct device_attribute *attr,
285 const char *buf, size_t count)
286{ struct kgsl_device *device = kgsl_device_from_dev(dev);
287 struct kgsl_pwrctrl *pwr;
288 int ret, level, min_level;
289
290 if (device == NULL)
291 return 0;
292
293 pwr = &device->pwrctrl;
294
295 ret = sscanf(buf, "%d", &level);
296 if (ret != 1)
297 return count;
298
299 /* Don't do anything on obviously incorrect values */
300 if (level < 0)
301 return count;
302
303 mutex_lock(&device->mutex);
304 if (level > pwr->num_pwrlevels - 2)
305 level = pwr->num_pwrlevels - 2;
306
307 /* You can't set a minimum power level lower than the maximum */
308 if (level < pwr->max_pwrlevel)
309 level = pwr->max_pwrlevel;
310
311 pwr->min_pwrlevel = level;
312
313 min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
314
315 /* Only move the power level higher if minimum is higher then the
316 * current level
317 */
318
319 if (min_level < pwr->active_pwrlevel)
320 kgsl_pwrctrl_pwrlevel_change(device, min_level);
321
322 mutex_unlock(&device->mutex);
323
324 return count;
325}
326
327static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
328 struct device_attribute *attr,
329 char *buf)
330{
331 struct kgsl_device *device = kgsl_device_from_dev(dev);
332 struct kgsl_pwrctrl *pwr;
333 if (device == NULL)
334 return 0;
335 pwr = &device->pwrctrl;
336 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
337}
338
339static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
340 struct device_attribute *attr,
341 char *buf)
342{
343
344 struct kgsl_device *device = kgsl_device_from_dev(dev);
345 struct kgsl_pwrctrl *pwr;
346 if (device == NULL)
347 return 0;
348 pwr = &device->pwrctrl;
349 return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
350}
351
352/* Given a GPU clock value, return the nearest powerlevel */
353
354static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
355{
356 int i;
357
358 for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
359 if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
360 return i;
361 }
362
363 return -ERANGE;
364}
365
366static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
367 struct device_attribute *attr,
368 const char *buf, size_t count)
369{
370 struct kgsl_device *device = kgsl_device_from_dev(dev);
371 struct kgsl_pwrctrl *pwr;
372 unsigned long val;
373 int ret, level;
374
375 if (device == NULL)
376 return 0;
377
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600378 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379
380 ret = sscanf(buf, "%ld", &val);
381 if (ret != 1)
382 return count;
383
384 mutex_lock(&device->mutex);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700385 level = _get_nearest_pwrlevel(pwr, val);
386 if (level < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 goto done;
388
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700389 pwr->thermal_pwrlevel = level;
390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391 /*
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700392 * if the thermal limit is lower than the current setting,
393 * move the speed down immediately
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394 */
395
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700396 if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398
399done:
400 mutex_unlock(&device->mutex);
401 return count;
402}
403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
405 struct device_attribute *attr,
406 char *buf)
407{
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600410 struct kgsl_pwrctrl *pwr;
411 if (device == NULL)
412 return 0;
413 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 return snprintf(buf, PAGE_SIZE, "%d\n",
415 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
416}
417
418static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
419 struct device_attribute *attr,
420 const char *buf, size_t count)
421{
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700422 struct kgsl_device *device = kgsl_device_from_dev(dev);
423 struct kgsl_pwrctrl *pwr;
424 unsigned long val;
425 int ret, level;
426
427 if (device == NULL)
428 return 0;
429
430 pwr = &device->pwrctrl;
431
432 ret = sscanf(buf, "%ld", &val);
433 if (ret != 1)
434 return count;
435
436 mutex_lock(&device->mutex);
437 level = _get_nearest_pwrlevel(pwr, val);
438 if (level >= 0)
439 kgsl_pwrctrl_pwrlevel_change(device, level);
440
441 mutex_unlock(&device->mutex);
442 return count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443}
444
445static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
446 struct device_attribute *attr,
447 char *buf)
448{
449 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600450 struct kgsl_pwrctrl *pwr;
451 if (device == NULL)
452 return 0;
453 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 return snprintf(buf, PAGE_SIZE, "%d\n",
455 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
456}
457
458static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
459 struct device_attribute *attr,
460 const char *buf, size_t count)
461{
462 char temp[20];
463 unsigned long val;
464 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600465 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466 int rc;
467
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600468 if (device == NULL)
469 return 0;
470 pwr = &device->pwrctrl;
471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 snprintf(temp, sizeof(temp), "%.*s",
473 (int)min(count, sizeof(temp) - 1), buf);
474 rc = strict_strtoul(temp, 0, &val);
475 if (rc)
476 return rc;
477
478 mutex_lock(&device->mutex);
479
480 if (val == 1)
481 pwr->nap_allowed = true;
482 else if (val == 0)
483 pwr->nap_allowed = false;
484
485 mutex_unlock(&device->mutex);
486
487 return count;
488}
489
490static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
491 struct device_attribute *attr,
492 char *buf)
493{
494 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600495 if (device == NULL)
496 return 0;
497 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498}
499
500
501static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
502 struct device_attribute *attr,
503 const char *buf, size_t count)
504{
505 char temp[20];
506 unsigned long val;
507 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600508 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 const long div = 1000/HZ;
510 static unsigned int org_interval_timeout = 1;
511 int rc;
512
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600513 if (device == NULL)
514 return 0;
515 pwr = &device->pwrctrl;
516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 snprintf(temp, sizeof(temp), "%.*s",
518 (int)min(count, sizeof(temp) - 1), buf);
519 rc = strict_strtoul(temp, 0, &val);
520 if (rc)
521 return rc;
522
523 if (org_interval_timeout == 1)
524 org_interval_timeout = pwr->interval_timeout;
525
526 mutex_lock(&device->mutex);
527
528 /* Let the timeout be requested in ms, but convert to jiffies. */
529 val /= div;
530 if (val >= org_interval_timeout)
531 pwr->interval_timeout = val;
532
533 mutex_unlock(&device->mutex);
534
535 return count;
536}
537
538static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
539 struct device_attribute *attr,
540 char *buf)
541{
542 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600543 if (device == NULL)
544 return 0;
545 return snprintf(buf, PAGE_SIZE, "%d\n",
546 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547}
548
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700549static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
550 struct device_attribute *attr,
551 char *buf)
552{
553 int ret;
554 struct kgsl_device *device = kgsl_device_from_dev(dev);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800555 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
556 ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
557 clkstats->on_time_old, clkstats->elapsed_old);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700558 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800559 clkstats->on_time_old = 0;
560 clkstats->elapsed_old = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700561 }
562 return ret;
563}
564
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800565static int kgsl_pwrctrl_gputop_show(struct device *dev,
566 struct device_attribute *attr,
567 char *buf)
568{
569 int ret;
570 struct kgsl_device *device = kgsl_device_from_dev(dev);
571 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
572 int i = 0;
573 char *ptr = buf;
574
575 ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
576 clkstats->elapsed_old);
577 for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
578 i++, ptr += ret)
579 ret = snprintf(ptr, PAGE_SIZE, "%7d ",
580 clkstats->old_clock_time[i]);
581
582 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
583 clkstats->on_time_old = 0;
584 clkstats->elapsed_old = 0;
585 for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
586 clkstats->old_clock_time[i] = 0;
587 }
588 return (unsigned int) (ptr - buf);
589}
590
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530591static int kgsl_pwrctrl_gpu_available_frequencies_show(
592 struct device *dev,
593 struct device_attribute *attr,
594 char *buf)
595{
596 struct kgsl_device *device = kgsl_device_from_dev(dev);
597 struct kgsl_pwrctrl *pwr;
598 int index, num_chars = 0;
599
600 if (device == NULL)
601 return 0;
602 pwr = &device->pwrctrl;
603 for (index = 0; index < pwr->num_pwrlevels - 1; index++)
604 num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
605 pwr->pwrlevels[index].gpu_freq);
606 buf[num_chars++] = '\n';
607 return num_chars;
608}
609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
611DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
612 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530613DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
615 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800616DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
617 NULL);
618DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700619 NULL);
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530620DEVICE_ATTR(gpu_available_frequencies, 0444,
621 kgsl_pwrctrl_gpu_available_frequencies_show,
622 NULL);
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700623DEVICE_ATTR(max_pwrlevel, 0644,
624 kgsl_pwrctrl_max_pwrlevel_show,
625 kgsl_pwrctrl_max_pwrlevel_store);
626DEVICE_ATTR(min_pwrlevel, 0644,
627 kgsl_pwrctrl_min_pwrlevel_show,
628 kgsl_pwrctrl_min_pwrlevel_store);
629DEVICE_ATTR(thermal_pwrlevel, 0644,
630 kgsl_pwrctrl_thermal_pwrlevel_show,
631 kgsl_pwrctrl_thermal_pwrlevel_store);
632DEVICE_ATTR(num_pwrlevels, 0444,
633 kgsl_pwrctrl_num_pwrlevels_show,
634 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635
636static const struct device_attribute *pwrctrl_attr_list[] = {
637 &dev_attr_gpuclk,
638 &dev_attr_max_gpuclk,
639 &dev_attr_pwrnap,
640 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700641 &dev_attr_gpubusy,
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800642 &dev_attr_gputop,
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530643 &dev_attr_gpu_available_frequencies,
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700644 &dev_attr_max_pwrlevel,
645 &dev_attr_min_pwrlevel,
646 &dev_attr_thermal_pwrlevel,
647 &dev_attr_num_pwrlevels,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 NULL
649};
650
651int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
652{
653 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
654}
655
656void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
657{
658 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
659}
660
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800661static void update_statistics(struct kgsl_device *device)
662{
663 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
664 unsigned int on_time = 0;
665 int i;
666 int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
667 /*PER CLK TIME*/
668 for (i = 0; i < num_pwrlevels; i++) {
669 clkstats->old_clock_time[i] = clkstats->clock_time[i];
670 on_time += clkstats->clock_time[i];
671 clkstats->clock_time[i] = 0;
672 }
673 clkstats->old_clock_time[num_pwrlevels] =
674 clkstats->clock_time[num_pwrlevels];
675 clkstats->clock_time[num_pwrlevels] = 0;
676 clkstats->on_time_old = on_time;
677 clkstats->elapsed_old = clkstats->elapsed;
678 clkstats->elapsed = 0;
679}
680
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700681/* Track the amount of time the gpu is on vs the total system time. *
682 * Regularly update the percentage of busy time displayed by sysfs. */
683static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
684{
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800685 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
686 update_clk_statistics(device, on_time);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700687 /* Update the output regularly and reset the counters. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800688 if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700689 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800690 update_statistics(device);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700691 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700692}
693
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600694void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
695 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696{
697 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
698 int i = 0;
699 if (state == KGSL_PWRFLAGS_OFF) {
700 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
701 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700702 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700703 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
704 if (pwr->grp_clks[i])
705 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700706 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700707 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700708 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709 clk_set_rate(pwr->grp_clks[0],
710 pwr->pwrlevels[pwr->num_pwrlevels - 1].
711 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700712 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
713 if (pwr->grp_clks[i])
714 clk_unprepare(pwr->grp_clks[i]);
715 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700716 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju426da0d2012-09-11 13:03:28 -0700717 } else if (requested_state == KGSL_STATE_SLEEP) {
718 /* High latency clock maintenance. */
719 if ((pwr->pwrlevels[0].gpu_freq > 0))
720 clk_set_rate(pwr->grp_clks[0],
721 pwr->pwrlevels[pwr->num_pwrlevels - 1].
722 gpu_freq);
723 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
724 if (pwr->grp_clks[i])
725 clk_unprepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726 }
727 } else if (state == KGSL_PWRFLAGS_ON) {
728 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
729 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700730 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700731 /* High latency clock maintenance. */
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530732 if (device->state != KGSL_STATE_NAP) {
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700733 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
734 if (pwr->grp_clks[i])
735 clk_prepare(pwr->grp_clks[i]);
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530736
737 if (pwr->pwrlevels[0].gpu_freq > 0)
738 clk_set_rate(pwr->grp_clks[0],
739 pwr->pwrlevels
740 [pwr->active_pwrlevel].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700742 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743 /* as last step, enable grp_clk
744 this is to let GPU interrupt to come */
745 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
746 if (pwr->grp_clks[i])
747 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700748 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 }
750 }
751}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752
753void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
754{
755 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
756
757 if (state == KGSL_PWRFLAGS_OFF) {
758 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
759 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700760 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530761 if (pwr->ebi1_clk) {
762 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600763 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530764 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 if (pwr->pcl)
766 msm_bus_scale_client_update_request(pwr->pcl,
767 0);
768 }
769 } else if (state == KGSL_PWRFLAGS_ON) {
770 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
771 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700772 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530773 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600774 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530775 clk_set_rate(pwr->ebi1_clk,
776 pwr->pwrlevels[pwr->active_pwrlevel].
777 bus_freq);
778 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 if (pwr->pcl)
780 msm_bus_scale_client_update_request(pwr->pcl,
781 pwr->pwrlevels[pwr->active_pwrlevel].
782 bus_freq);
783 }
784 }
785}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786
787void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
788{
789 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
790
791 if (state == KGSL_PWRFLAGS_OFF) {
792 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
793 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700794 trace_kgsl_rail(device, state);
Pu Chen12053782012-07-24 17:04:27 -0700795 if (pwr->gpu_cx)
796 regulator_disable(pwr->gpu_cx);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797 if (pwr->gpu_reg)
798 regulator_disable(pwr->gpu_reg);
799 }
800 } else if (state == KGSL_PWRFLAGS_ON) {
801 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
802 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700803 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600804 if (pwr->gpu_reg) {
805 int status = regulator_enable(pwr->gpu_reg);
806 if (status)
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700807 KGSL_DRV_ERR(device,
808 "core regulator_enable "
809 "failed: %d\n",
810 status);
811 }
Pu Chen12053782012-07-24 17:04:27 -0700812 if (pwr->gpu_cx) {
813 int status = regulator_enable(pwr->gpu_cx);
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700814 if (status)
815 KGSL_DRV_ERR(device,
816 "cx regulator_enable "
817 "failed: %d\n",
818 status);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600819 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 }
821 }
822}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823
824void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
825{
826 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
827
828 if (state == KGSL_PWRFLAGS_ON) {
829 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
830 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700831 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 }
834 } else if (state == KGSL_PWRFLAGS_OFF) {
835 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
836 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700837 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600838 if (in_interrupt())
839 disable_irq_nosync(pwr->interrupt_num);
840 else
841 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842 }
843 }
844}
845EXPORT_SYMBOL(kgsl_pwrctrl_irq);
846
847int kgsl_pwrctrl_init(struct kgsl_device *device)
848{
849 int i, result = 0;
850 struct clk *clk;
851 struct platform_device *pdev =
852 container_of(device->parentdev, struct platform_device, dev);
853 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600854 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855
856 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600857 for (i = 0; i < KGSL_MAX_CLKS; i++) {
858 if (pdata->clk_map & clks[i].map) {
859 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860 if (IS_ERR(clk))
861 goto clk_err;
862 pwr->grp_clks[i] = clk;
863 }
864 }
865 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600866 if (pwr->grp_clks[0] == NULL)
867 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700868
869 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600870 if (pdata->set_grp_async != NULL)
871 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700872
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600873 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600875 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 result = -EINVAL;
877 goto done;
878 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600879 pwr->num_pwrlevels = pdata->num_levels;
Jordan Crouse2ddfc8a2012-11-27 11:33:06 -0700880
881 /* Initialize the user and thermal clock constraints */
882
883 pwr->max_pwrlevel = 0;
884 pwr->min_pwrlevel = pdata->num_levels - 2;
885 pwr->thermal_pwrlevel = 0;
886
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600887 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800888 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600889 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700890 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600891 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600893 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894 gpu_freq) : 0;
895 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600896 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600897 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600898 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 }
900 /* Do not set_rate for targets in sync with AXI */
901 if (pwr->pwrlevels[0].gpu_freq > 0)
902 clk_set_rate(pwr->grp_clks[0], pwr->
903 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
904
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700905 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 if (IS_ERR(pwr->gpu_reg))
907 pwr->gpu_reg = NULL;
908
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700909 if (pwr->gpu_reg) {
Pu Chen12053782012-07-24 17:04:27 -0700910 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
911 if (IS_ERR(pwr->gpu_cx))
912 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700913 } else
Pu Chen12053782012-07-24 17:04:27 -0700914 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 pwr->power_flags = 0;
917
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600918 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530919 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600920 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700921 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700922 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923 if (IS_ERR(pwr->ebi1_clk))
924 pwr->ebi1_clk = NULL;
925 else
926 clk_set_rate(pwr->ebi1_clk,
927 pwr->pwrlevels[pwr->active_pwrlevel].
928 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600929 if (pdata->bus_scale_table != NULL) {
930 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931 bus_scale_table);
932 if (!pwr->pcl) {
933 KGSL_PWR_ERR(device,
934 "msm_bus_scale_register_client failed: "
935 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600936 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 result = -EINVAL;
938 goto done;
939 }
940 }
941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700943 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 register_early_suspend(&device->display_off);
945 return result;
946
947clk_err:
948 result = PTR_ERR(clk);
949 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600950 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951
952done:
953 return result;
954}
955
956void kgsl_pwrctrl_close(struct kgsl_device *device)
957{
958 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
959 int i;
960
961 KGSL_PWR_INFO(device, "close device %d\n", device->id);
962
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700963 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964 unregister_early_suspend(&device->display_off);
965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966 clk_put(pwr->ebi1_clk);
967
968 if (pwr->pcl)
969 msm_bus_scale_unregister_client(pwr->pcl);
970
971 pwr->pcl = 0;
972
973 if (pwr->gpu_reg) {
974 regulator_put(pwr->gpu_reg);
975 pwr->gpu_reg = NULL;
976 }
977
Pu Chen12053782012-07-24 17:04:27 -0700978 if (pwr->gpu_cx) {
979 regulator_put(pwr->gpu_cx);
980 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700981 }
982
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700983 for (i = 1; i < KGSL_MAX_CLKS; i++)
984 if (pwr->grp_clks[i]) {
985 clk_put(pwr->grp_clks[i]);
986 pwr->grp_clks[i] = NULL;
987 }
988
989 pwr->grp_clks[0] = NULL;
990 pwr->power_flags = 0;
991}
992
993void kgsl_idle_check(struct work_struct *work)
994{
995 struct kgsl_device *device = container_of(work, struct kgsl_device,
996 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700997 WARN_ON(device == NULL);
998 if (device == NULL)
999 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000
1001 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju3de11cc2012-06-20 10:33:32 -07001003 kgsl_pwrscale_idle(device, 0);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -06001004
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001005 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001006 mod_timer(&device->idle_timer,
1007 jiffies +
1008 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001009 /* If the GPU has been too busy to sleep, make sure *
1010 * that is acurately reflected in the % busy numbers. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001011 device->pwrctrl.clk_stats.no_nap_cnt++;
1012 if (device->pwrctrl.clk_stats.no_nap_cnt >
1013 UPDATE_BUSY) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001014 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001015 device->pwrctrl.clk_stats.no_nap_cnt = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -07001016 }
1017 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 } else if (device->state & (KGSL_STATE_HUNG |
1019 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -07001020 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001021 }
1022
1023 mutex_unlock(&device->mutex);
1024}
1025
1026void kgsl_timer(unsigned long data)
1027{
1028 struct kgsl_device *device = (struct kgsl_device *) data;
1029
1030 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +05301031 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -07001032 if (device->pwrctrl.restore_slumber ||
1033 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -07001034 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
1035 else
1036 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037 /* Have work run in a non-interrupt context. */
1038 queue_work(device->work_queue, &device->idle_check_ws);
1039 }
1040}
1041
1042void kgsl_pre_hwaccess(struct kgsl_device *device)
1043{
1044 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -07001045 switch (device->state) {
1046 case KGSL_STATE_ACTIVE:
1047 return;
1048 case KGSL_STATE_NAP:
1049 case KGSL_STATE_SLEEP:
1050 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001051 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -07001052 break;
1053 case KGSL_STATE_SUSPEND:
1054 kgsl_check_suspended(device);
1055 break;
1056 case KGSL_STATE_INIT:
1057 case KGSL_STATE_HUNG:
1058 case KGSL_STATE_DUMP_AND_RECOVER:
1059 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
1060 &device->pwrctrl.power_flags))
1061 break;
1062 else
1063 KGSL_PWR_ERR(device,
1064 "hw access while clocks off from state %d\n",
1065 device->state);
1066 break;
1067 default:
1068 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
1069 device->state);
1070 break;
1071 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072}
1073EXPORT_SYMBOL(kgsl_pre_hwaccess);
1074
1075void kgsl_check_suspended(struct kgsl_device *device)
1076{
1077 if (device->requested_state == KGSL_STATE_SUSPEND ||
1078 device->state == KGSL_STATE_SUSPEND) {
1079 mutex_unlock(&device->mutex);
1080 wait_for_completion(&device->hwaccess_gate);
1081 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -07001082 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001083 mutex_unlock(&device->mutex);
1084 wait_for_completion(&device->recovery_gate);
1085 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -07001086 } else if (device->state == KGSL_STATE_SLUMBER)
1087 kgsl_pwrctrl_wake(device);
1088}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089
Suman Tatiraju24569022011-10-27 11:11:12 -07001090static int
Jeremy Gebben388c2972011-12-16 09:05:07 -07001091_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -07001092{
Suman Tatiraju24569022011-10-27 11:11:12 -07001093 switch (device->state) {
1094 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001095 if (!device->ftbl->isidle(device)) {
1096 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1097 return -EBUSY;
1098 }
1099 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001100 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
1101 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Suman Tatiraju24569022011-10-27 11:11:12 -07001102 case KGSL_STATE_NAP:
1103 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001104 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -07001105 break;
1106 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001107 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -07001108 break;
1109 }
Jeremy Gebben388c2972011-12-16 09:05:07 -07001110 return 0;
1111}
1112
1113static void
1114_sleep_accounting(struct kgsl_device *device)
1115{
1116 kgsl_pwrctrl_busy_time(device, false);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -08001117 device->pwrctrl.clk_stats.start = ktime_set(0, 0);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001118 device->pwrctrl.time = 0;
1119 kgsl_pwrscale_sleep(device);
1120}
1121
1122static int
1123_sleep(struct kgsl_device *device)
1124{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001125 switch (device->state) {
1126 case KGSL_STATE_ACTIVE:
1127 if (!device->ftbl->isidle(device)) {
1128 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1129 return -EBUSY;
1130 }
1131 /* fall through */
1132 case KGSL_STATE_NAP:
1133 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
1134 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001135 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001136 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001137 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -07001138 pm_qos_update_request(&device->pm_qos_req_dma,
1139 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001140 break;
1141 case KGSL_STATE_SLEEP:
1142 case KGSL_STATE_SLUMBER:
1143 break;
1144 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001145 KGSL_PWR_WARN(device, "unhandled state %s\n",
1146 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001147 break;
1148 }
1149 return 0;
1150}
1151
1152static int
1153_slumber(struct kgsl_device *device)
1154{
1155 switch (device->state) {
1156 case KGSL_STATE_ACTIVE:
1157 if (!device->ftbl->isidle(device)) {
1158 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001159 return -EBUSY;
1160 }
1161 /* fall through */
1162 case KGSL_STATE_NAP:
1163 case KGSL_STATE_SLEEP:
1164 del_timer_sync(&device->idle_timer);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001165 device->ftbl->suspend_context(device);
1166 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001167 _sleep_accounting(device);
1168 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -07001169 pm_qos_update_request(&device->pm_qos_req_dma,
1170 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001171 break;
1172 case KGSL_STATE_SLUMBER:
1173 break;
1174 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001175 KGSL_PWR_WARN(device, "unhandled state %s\n",
1176 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001177 break;
1178 }
1179 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -07001180}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181
1182/******************************************************************/
1183/* Caller must hold the device mutex. */
1184int kgsl_pwrctrl_sleep(struct kgsl_device *device)
1185{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001186 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
1188
1189 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -07001190 switch (device->requested_state) {
1191 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -07001192 status = _nap(device);
1193 break;
1194 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -07001195 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001196 break;
1197 case KGSL_STATE_SLUMBER:
1198 status = _slumber(device);
1199 break;
1200 default:
1201 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
1202 device->requested_state);
1203 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1204 status = -EINVAL;
1205 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 }
Suman Tatiraju24569022011-10-27 11:11:12 -07001207 return status;
1208}
Jeremy Gebben388c2972011-12-16 09:05:07 -07001209EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -07001210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001211/******************************************************************/
1212/* Caller must hold the device mutex. */
1213void kgsl_pwrctrl_wake(struct kgsl_device *device)
1214{
Jeremy Gebben388c2972011-12-16 09:05:07 -07001215 int status;
1216 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
1217 switch (device->state) {
1218 case KGSL_STATE_SLUMBER:
1219 status = device->ftbl->start(device, 0);
1220 if (status) {
1221 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1222 KGSL_DRV_ERR(device, "start failed %d\n", status);
1223 break;
1224 }
1225 /* fall through */
1226 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1228 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001229 /* fall through */
1230 case KGSL_STATE_NAP:
1231 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001232 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001233 /* Enable state before turning on irq */
1234 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
1235 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
1236 /* Re-enable HW access */
1237 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 jiffies + device->pwrctrl.interval_timeout);
Devin Kim66ad4c02012-09-21 20:28:50 -07001239 pm_qos_update_request(&device->pm_qos_req_dma,
1240 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001241 case KGSL_STATE_ACTIVE:
Vinay Roy65c41b32012-11-25 00:48:38 +05301242 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001243 break;
1244 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001245 KGSL_PWR_WARN(device, "unhandled state %s\n",
1246 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -07001247 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
1248 break;
1249 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250}
1251EXPORT_SYMBOL(kgsl_pwrctrl_wake);
1252
1253void kgsl_pwrctrl_enable(struct kgsl_device *device)
1254{
1255 /* Order pwrrail/clk sequence based upon platform */
1256 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001257 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
1259}
1260EXPORT_SYMBOL(kgsl_pwrctrl_enable);
1261
1262void kgsl_pwrctrl_disable(struct kgsl_device *device)
1263{
1264 /* Order pwrrail/clk sequence based upon platform */
1265 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001266 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
1268}
1269EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001270
1271void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
1272{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001273 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001274 device->state = state;
1275 device->requested_state = KGSL_STATE_NONE;
1276}
1277EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
1278
1279void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
1280{
1281 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001282 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001283 device->requested_state = state;
1284}
1285EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001286
1287const char *kgsl_pwrstate_to_str(unsigned int state)
1288{
1289 switch (state) {
1290 case KGSL_STATE_NONE:
1291 return "NONE";
1292 case KGSL_STATE_INIT:
1293 return "INIT";
1294 case KGSL_STATE_ACTIVE:
1295 return "ACTIVE";
1296 case KGSL_STATE_NAP:
1297 return "NAP";
1298 case KGSL_STATE_SLEEP:
1299 return "SLEEP";
1300 case KGSL_STATE_SUSPEND:
1301 return "SUSPEND";
1302 case KGSL_STATE_HUNG:
1303 return "HUNG";
1304 case KGSL_STATE_DUMP_AND_RECOVER:
1305 return "DNR";
1306 case KGSL_STATE_SLUMBER:
1307 return "SLUMBER";
1308 default:
1309 break;
1310 }
1311 return "UNKNOWN";
1312}
1313EXPORT_SYMBOL(kgsl_pwrstate_to_str);
1314