blob: 420ac50ef4fedc864fe94dbeafd344b2f92a5f90 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
20
21#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070022#define UPDATE_BUSY_VAL 1000000
23#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
25void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
26 unsigned int new_level)
27{
28 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
29 if (new_level < (pwr->num_pwrlevels - 1) &&
30 new_level >= pwr->thermal_pwrlevel &&
31 new_level != pwr->active_pwrlevel) {
32 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060033 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
34 (device->state == KGSL_STATE_NAP))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035 clk_set_rate(pwr->grp_clks[0],
36 pwr->pwrlevels[pwr->active_pwrlevel].
37 gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060038 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039 if (pwr->pcl)
40 msm_bus_scale_client_update_request(pwr->pcl,
41 pwr->pwrlevels[pwr->active_pwrlevel].
42 bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060043 else if (pwr->ebi1_clk)
44 clk_set_rate(pwr->ebi1_clk,
45 pwr->pwrlevels[pwr->active_pwrlevel].
46 bus_freq);
47 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
49 pwr->active_pwrlevel);
50 }
51}
52EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
53
54static int __gpuclk_store(int max, struct device *dev,
55 struct device_attribute *attr,
56 const char *buf, size_t count)
57{ int ret, i, delta = 5000000;
58 unsigned long val;
59 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060060 struct kgsl_pwrctrl *pwr;
61
62 if (device == NULL)
63 return 0;
64 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
66 ret = sscanf(buf, "%ld", &val);
67 if (ret != 1)
68 return count;
69
70 mutex_lock(&device->mutex);
71 for (i = 0; i < pwr->num_pwrlevels; i++) {
72 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
73 if (max)
74 pwr->thermal_pwrlevel = i;
75 break;
76 }
77 }
78
79 if (i == pwr->num_pwrlevels)
80 goto done;
81
82 /*
83 * If the current or requested clock speed is greater than the
84 * thermal limit, bump down immediately.
85 */
86
87 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
88 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
89 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
90 else if (!max)
91 kgsl_pwrctrl_pwrlevel_change(device, i);
92
93done:
94 mutex_unlock(&device->mutex);
95 return count;
96}
97
98static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
99 struct device_attribute *attr,
100 const char *buf, size_t count)
101{
102 return __gpuclk_store(1, dev, attr, buf, count);
103}
104
105static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
106 struct device_attribute *attr,
107 char *buf)
108{
109 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600110 struct kgsl_pwrctrl *pwr;
111 if (device == NULL)
112 return 0;
113 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114 return snprintf(buf, PAGE_SIZE, "%d\n",
115 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
116}
117
118static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
119 struct device_attribute *attr,
120 const char *buf, size_t count)
121{
122 return __gpuclk_store(0, dev, attr, buf, count);
123}
124
125static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
126 struct device_attribute *attr,
127 char *buf)
128{
129 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600130 struct kgsl_pwrctrl *pwr;
131 if (device == NULL)
132 return 0;
133 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134 return snprintf(buf, PAGE_SIZE, "%d\n",
135 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
136}
137
138static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
139 struct device_attribute *attr,
140 const char *buf, size_t count)
141{
142 char temp[20];
143 unsigned long val;
144 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600145 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 int rc;
147
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600148 if (device == NULL)
149 return 0;
150 pwr = &device->pwrctrl;
151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152 snprintf(temp, sizeof(temp), "%.*s",
153 (int)min(count, sizeof(temp) - 1), buf);
154 rc = strict_strtoul(temp, 0, &val);
155 if (rc)
156 return rc;
157
158 mutex_lock(&device->mutex);
159
160 if (val == 1)
161 pwr->nap_allowed = true;
162 else if (val == 0)
163 pwr->nap_allowed = false;
164
165 mutex_unlock(&device->mutex);
166
167 return count;
168}
169
170static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
171 struct device_attribute *attr,
172 char *buf)
173{
174 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600175 if (device == NULL)
176 return 0;
177 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178}
179
180
181static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
182 struct device_attribute *attr,
183 const char *buf, size_t count)
184{
185 char temp[20];
186 unsigned long val;
187 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600188 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 const long div = 1000/HZ;
190 static unsigned int org_interval_timeout = 1;
191 int rc;
192
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600193 if (device == NULL)
194 return 0;
195 pwr = &device->pwrctrl;
196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 snprintf(temp, sizeof(temp), "%.*s",
198 (int)min(count, sizeof(temp) - 1), buf);
199 rc = strict_strtoul(temp, 0, &val);
200 if (rc)
201 return rc;
202
203 if (org_interval_timeout == 1)
204 org_interval_timeout = pwr->interval_timeout;
205
206 mutex_lock(&device->mutex);
207
208 /* Let the timeout be requested in ms, but convert to jiffies. */
209 val /= div;
210 if (val >= org_interval_timeout)
211 pwr->interval_timeout = val;
212
213 mutex_unlock(&device->mutex);
214
215 return count;
216}
217
218static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
219 struct device_attribute *attr,
220 char *buf)
221{
222 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600223 if (device == NULL)
224 return 0;
225 return snprintf(buf, PAGE_SIZE, "%d\n",
226 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227}
228
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700229static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
230 struct device_attribute *attr,
231 char *buf)
232{
233 int ret;
234 struct kgsl_device *device = kgsl_device_from_dev(dev);
235 struct kgsl_busy *b = &device->pwrctrl.busy;
236 ret = snprintf(buf, 17, "%7d %7d\n",
237 b->on_time_old, b->time_old);
238 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
239 b->on_time_old = 0;
240 b->time_old = 0;
241 }
242 return ret;
243}
244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
246DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
247 kgsl_pwrctrl_max_gpuclk_store);
248DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
249DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
250 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700251DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
252 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253
254static const struct device_attribute *pwrctrl_attr_list[] = {
255 &dev_attr_gpuclk,
256 &dev_attr_max_gpuclk,
257 &dev_attr_pwrnap,
258 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700259 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 NULL
261};
262
263int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
264{
265 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
266}
267
268void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
269{
270 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
271}
272
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700273/* Track the amount of time the gpu is on vs the total system time. *
274 * Regularly update the percentage of busy time displayed by sysfs. */
275static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
276{
277 struct kgsl_busy *b = &device->pwrctrl.busy;
278 int elapsed;
279 if (b->start.tv_sec == 0)
280 do_gettimeofday(&(b->start));
281 do_gettimeofday(&(b->stop));
282 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
283 elapsed += b->stop.tv_usec - b->start.tv_usec;
284 b->time += elapsed;
285 if (on_time)
286 b->on_time += elapsed;
287 /* Update the output regularly and reset the counters. */
288 if ((b->time > UPDATE_BUSY_VAL) ||
289 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
290 b->on_time_old = b->on_time;
291 b->time_old = b->time;
292 b->on_time = 0;
293 b->time = 0;
294 }
295 do_gettimeofday(&(b->start));
296}
297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
299{
300 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
301 int i = 0;
302 if (state == KGSL_PWRFLAGS_OFF) {
303 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
304 &pwr->power_flags)) {
305 KGSL_PWR_INFO(device,
306 "clocks off, device %d\n", device->id);
307 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
308 if (pwr->grp_clks[i])
309 clk_disable(pwr->grp_clks[i]);
310 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
311 (device->requested_state != KGSL_STATE_NAP))
312 clk_set_rate(pwr->grp_clks[0],
313 pwr->pwrlevels[pwr->num_pwrlevels - 1].
314 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700315 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 }
317 } else if (state == KGSL_PWRFLAGS_ON) {
318 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
319 &pwr->power_flags)) {
320 KGSL_PWR_INFO(device,
321 "clocks on, device %d\n", device->id);
322
323 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
324 (device->state != KGSL_STATE_NAP))
325 clk_set_rate(pwr->grp_clks[0],
326 pwr->pwrlevels[pwr->active_pwrlevel].
327 gpu_freq);
328
329 /* as last step, enable grp_clk
330 this is to let GPU interrupt to come */
331 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
332 if (pwr->grp_clks[i])
333 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700334 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 }
336 }
337}
338EXPORT_SYMBOL(kgsl_pwrctrl_clk);
339
340void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
341{
342 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
343
344 if (state == KGSL_PWRFLAGS_OFF) {
345 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
346 &pwr->power_flags)) {
347 KGSL_PWR_INFO(device,
348 "axi off, device %d\n", device->id);
349 if (pwr->ebi1_clk)
350 clk_disable(pwr->ebi1_clk);
351 if (pwr->pcl)
352 msm_bus_scale_client_update_request(pwr->pcl,
353 0);
354 }
355 } else if (state == KGSL_PWRFLAGS_ON) {
356 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
357 &pwr->power_flags)) {
358 KGSL_PWR_INFO(device,
359 "axi on, device %d\n", device->id);
360 if (pwr->ebi1_clk)
361 clk_enable(pwr->ebi1_clk);
362 if (pwr->pcl)
363 msm_bus_scale_client_update_request(pwr->pcl,
364 pwr->pwrlevels[pwr->active_pwrlevel].
365 bus_freq);
366 }
367 }
368}
369EXPORT_SYMBOL(kgsl_pwrctrl_axi);
370
371
372void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
373{
374 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
375
376 if (state == KGSL_PWRFLAGS_OFF) {
377 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
378 &pwr->power_flags)) {
379 KGSL_PWR_INFO(device,
380 "power off, device %d\n", device->id);
381 if (pwr->gpu_reg)
382 regulator_disable(pwr->gpu_reg);
383 }
384 } else if (state == KGSL_PWRFLAGS_ON) {
385 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
386 &pwr->power_flags)) {
387 KGSL_PWR_INFO(device,
388 "power on, device %d\n", device->id);
389 if (pwr->gpu_reg)
390 regulator_enable(pwr->gpu_reg);
391 }
392 }
393}
394EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
395
396void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
397{
398 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
399
400 if (state == KGSL_PWRFLAGS_ON) {
401 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
402 &pwr->power_flags)) {
403 KGSL_PWR_INFO(device,
404 "irq on, device %d\n", device->id);
405 enable_irq(pwr->interrupt_num);
406 device->ftbl->irqctrl(device, 1);
407 }
408 } else if (state == KGSL_PWRFLAGS_OFF) {
409 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
410 &pwr->power_flags)) {
411 KGSL_PWR_INFO(device,
412 "irq off, device %d\n", device->id);
413 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600414 if (in_interrupt())
415 disable_irq_nosync(pwr->interrupt_num);
416 else
417 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 }
419 }
420}
421EXPORT_SYMBOL(kgsl_pwrctrl_irq);
422
423int kgsl_pwrctrl_init(struct kgsl_device *device)
424{
425 int i, result = 0;
426 struct clk *clk;
427 struct platform_device *pdev =
428 container_of(device->parentdev, struct platform_device, dev);
429 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
430 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
431 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
432 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
433 pdata_dev->clk.name.clk,
434 pdata_dev->clk.name.pclk,
435 pdata_dev->imem_clk_name.clk,
436 pdata_dev->imem_clk_name.pclk};
437
438 /*acquire clocks */
439 for (i = 1; i < KGSL_MAX_CLKS; i++) {
440 if (clk_names[i]) {
441 clk = clk_get(&pdev->dev, clk_names[i]);
442 if (IS_ERR(clk))
443 goto clk_err;
444 pwr->grp_clks[i] = clk;
445 }
446 }
447 /* Make sure we have a source clk for freq setting */
448 clk = clk_get(&pdev->dev, clk_names[0]);
449 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
450
451 /* put the AXI bus into asynchronous mode with the graphics cores */
452 if (pdata_pwr->set_grp_async != NULL)
453 pdata_pwr->set_grp_async();
454
455 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
456 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
457 pdata_pwr->num_levels);
458 result = -EINVAL;
459 goto done;
460 }
461 pwr->num_pwrlevels = pdata_pwr->num_levels;
462 pwr->active_pwrlevel = pdata_pwr->init_level;
463 for (i = 0; i < pdata_pwr->num_levels; i++) {
464 pwr->pwrlevels[i].gpu_freq =
465 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
466 clk_round_rate(pwr->grp_clks[0],
467 pdata_pwr->pwrlevel[i].
468 gpu_freq) : 0;
469 pwr->pwrlevels[i].bus_freq =
470 pdata_pwr->pwrlevel[i].bus_freq;
471 }
472 /* Do not set_rate for targets in sync with AXI */
473 if (pwr->pwrlevels[0].gpu_freq > 0)
474 clk_set_rate(pwr->grp_clks[0], pwr->
475 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
476
477 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
478 if (IS_ERR(pwr->gpu_reg))
479 pwr->gpu_reg = NULL;
480
481 pwr->power_flags = 0;
482
483 pwr->nap_allowed = pdata_pwr->nap_allowed;
484 pwr->interval_timeout = pdata_pwr->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700485 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 if (IS_ERR(pwr->ebi1_clk))
487 pwr->ebi1_clk = NULL;
488 else
489 clk_set_rate(pwr->ebi1_clk,
490 pwr->pwrlevels[pwr->active_pwrlevel].
491 bus_freq);
492 if (pdata_dev->clk.bus_scale_table != NULL) {
493 pwr->pcl =
494 msm_bus_scale_register_client(pdata_dev->clk.
495 bus_scale_table);
496 if (!pwr->pcl) {
497 KGSL_PWR_ERR(device,
498 "msm_bus_scale_register_client failed: "
499 "id %d table %p", device->id,
500 pdata_dev->clk.bus_scale_table);
501 result = -EINVAL;
502 goto done;
503 }
504 }
505
506 /*acquire interrupt */
507 pwr->interrupt_num =
508 platform_get_irq_byname(pdev, pwr->irq_name);
509
510 if (pwr->interrupt_num <= 0) {
511 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
512 pwr->interrupt_num);
513 result = -EINVAL;
514 goto done;
515 }
516
517 register_early_suspend(&device->display_off);
518 return result;
519
520clk_err:
521 result = PTR_ERR(clk);
522 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
523 clk_names[i], result);
524
525done:
526 return result;
527}
528
529void kgsl_pwrctrl_close(struct kgsl_device *device)
530{
531 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
532 int i;
533
534 KGSL_PWR_INFO(device, "close device %d\n", device->id);
535
536 unregister_early_suspend(&device->display_off);
537
538 if (pwr->interrupt_num > 0) {
539 if (pwr->have_irq) {
540 free_irq(pwr->interrupt_num, NULL);
541 pwr->have_irq = 0;
542 }
543 pwr->interrupt_num = 0;
544 }
545
546 clk_put(pwr->ebi1_clk);
547
548 if (pwr->pcl)
549 msm_bus_scale_unregister_client(pwr->pcl);
550
551 pwr->pcl = 0;
552
553 if (pwr->gpu_reg) {
554 regulator_put(pwr->gpu_reg);
555 pwr->gpu_reg = NULL;
556 }
557
558 for (i = 1; i < KGSL_MAX_CLKS; i++)
559 if (pwr->grp_clks[i]) {
560 clk_put(pwr->grp_clks[i]);
561 pwr->grp_clks[i] = NULL;
562 }
563
564 pwr->grp_clks[0] = NULL;
565 pwr->power_flags = 0;
566}
567
568void kgsl_idle_check(struct work_struct *work)
569{
570 struct kgsl_device *device = container_of(work, struct kgsl_device,
571 idle_check_ws);
572
573 mutex_lock(&device->mutex);
574 if (device->requested_state != KGSL_STATE_SLEEP)
575 kgsl_pwrscale_idle(device);
576
577 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700578 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 mod_timer(&device->idle_timer,
580 jiffies +
581 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700582 /* If the GPU has been too busy to sleep, make sure *
583 * that is acurately reflected in the % busy numbers. */
584 device->pwrctrl.busy.no_nap_cnt++;
585 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
586 kgsl_pwrctrl_busy_time(device, true);
587 device->pwrctrl.busy.no_nap_cnt = 0;
588 }
589 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 } else if (device->state & (KGSL_STATE_HUNG |
591 KGSL_STATE_DUMP_AND_RECOVER)) {
592 device->requested_state = KGSL_STATE_NONE;
593 }
594
595 mutex_unlock(&device->mutex);
596}
597
598void kgsl_timer(unsigned long data)
599{
600 struct kgsl_device *device = (struct kgsl_device *) data;
601
602 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
603 if (device->requested_state != KGSL_STATE_SUSPEND) {
604 device->requested_state = KGSL_STATE_SLEEP;
605 /* Have work run in a non-interrupt context. */
606 queue_work(device->work_queue, &device->idle_check_ws);
607 }
608}
609
610void kgsl_pre_hwaccess(struct kgsl_device *device)
611{
612 BUG_ON(!mutex_is_locked(&device->mutex));
613 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
614 kgsl_pwrctrl_wake(device);
615}
616EXPORT_SYMBOL(kgsl_pre_hwaccess);
617
618void kgsl_check_suspended(struct kgsl_device *device)
619{
620 if (device->requested_state == KGSL_STATE_SUSPEND ||
621 device->state == KGSL_STATE_SUSPEND) {
622 mutex_unlock(&device->mutex);
623 wait_for_completion(&device->hwaccess_gate);
624 mutex_lock(&device->mutex);
625 }
626 if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
627 mutex_unlock(&device->mutex);
628 wait_for_completion(&device->recovery_gate);
629 mutex_lock(&device->mutex);
630 }
631 }
632
633
634/******************************************************************/
635/* Caller must hold the device mutex. */
636int kgsl_pwrctrl_sleep(struct kgsl_device *device)
637{
638 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
639 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
640
641 /* Work through the legal state transitions */
642 if (device->requested_state == KGSL_STATE_NAP) {
643 if (device->ftbl->isidle(device))
644 goto nap;
645 } else if (device->requested_state == KGSL_STATE_SLEEP) {
646 if (device->state == KGSL_STATE_NAP ||
647 device->ftbl->isidle(device))
648 goto sleep;
649 }
650
651 device->requested_state = KGSL_STATE_NONE;
652 return -EBUSY;
653
654sleep:
655 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
656 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
657 if (pwr->pwrlevels[0].gpu_freq > 0)
658 clk_set_rate(pwr->grp_clks[0],
659 pwr->pwrlevels[pwr->num_pwrlevels - 1].
660 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700661 kgsl_pwrctrl_busy_time(device, false);
662 pwr->busy.start.tv_sec = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663 device->pwrctrl.time = 0;
664
665 kgsl_pwrscale_sleep(device);
666 goto clk_off;
667
668nap:
669 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
670clk_off:
671 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
672
673 device->state = device->requested_state;
674 device->requested_state = KGSL_STATE_NONE;
675 wake_unlock(&device->idle_wakelock);
676 pm_qos_update_request(&device->pm_qos_req_dma,
677 PM_QOS_DEFAULT_VALUE);
678 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
679 device->state, device->id);
680
681 return 0;
682}
683EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
684
685/******************************************************************/
686/* Caller must hold the device mutex. */
687void kgsl_pwrctrl_wake(struct kgsl_device *device)
688{
689 if (device->state == KGSL_STATE_SUSPEND)
690 return;
691
692 if (device->state != KGSL_STATE_NAP) {
693 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
694 kgsl_pwrscale_wake(device);
695 }
696
697 /* Turn on the core clocks */
698 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
699
700 /* Enable state before turning on irq */
701 device->state = KGSL_STATE_ACTIVE;
702 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
703 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
704
705 /* Re-enable HW access */
706 mod_timer(&device->idle_timer,
707 jiffies + device->pwrctrl.interval_timeout);
708
709 wake_lock(&device->idle_wakelock);
710 pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
711 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
712}
713EXPORT_SYMBOL(kgsl_pwrctrl_wake);
714
715void kgsl_pwrctrl_enable(struct kgsl_device *device)
716{
717 /* Order pwrrail/clk sequence based upon platform */
718 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
719 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
720 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
721}
722EXPORT_SYMBOL(kgsl_pwrctrl_enable);
723
724void kgsl_pwrctrl_disable(struct kgsl_device *device)
725{
726 /* Order pwrrail/clk sequence based upon platform */
727 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
728 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
729 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
730}
731EXPORT_SYMBOL(kgsl_pwrctrl_disable);