blob: 115f0732542430936ea263ee74d5d689d9243762 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
20
21#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070022#define UPDATE_BUSY_VAL 1000000
23#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
25void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
26 unsigned int new_level)
27{
28 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
29 if (new_level < (pwr->num_pwrlevels - 1) &&
30 new_level >= pwr->thermal_pwrlevel &&
31 new_level != pwr->active_pwrlevel) {
32 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060033 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
34 (device->state == KGSL_STATE_NAP))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035 clk_set_rate(pwr->grp_clks[0],
36 pwr->pwrlevels[pwr->active_pwrlevel].
37 gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060038 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039 if (pwr->pcl)
40 msm_bus_scale_client_update_request(pwr->pcl,
41 pwr->pwrlevels[pwr->active_pwrlevel].
42 bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060043 else if (pwr->ebi1_clk)
44 clk_set_rate(pwr->ebi1_clk,
45 pwr->pwrlevels[pwr->active_pwrlevel].
46 bus_freq);
47 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
49 pwr->active_pwrlevel);
50 }
51}
52EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
53
54static int __gpuclk_store(int max, struct device *dev,
55 struct device_attribute *attr,
56 const char *buf, size_t count)
57{ int ret, i, delta = 5000000;
58 unsigned long val;
59 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060060 struct kgsl_pwrctrl *pwr;
61
62 if (device == NULL)
63 return 0;
64 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
66 ret = sscanf(buf, "%ld", &val);
67 if (ret != 1)
68 return count;
69
70 mutex_lock(&device->mutex);
71 for (i = 0; i < pwr->num_pwrlevels; i++) {
72 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
73 if (max)
74 pwr->thermal_pwrlevel = i;
75 break;
76 }
77 }
78
79 if (i == pwr->num_pwrlevels)
80 goto done;
81
82 /*
83 * If the current or requested clock speed is greater than the
84 * thermal limit, bump down immediately.
85 */
86
87 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
88 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
89 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
90 else if (!max)
91 kgsl_pwrctrl_pwrlevel_change(device, i);
92
93done:
94 mutex_unlock(&device->mutex);
95 return count;
96}
97
98static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
99 struct device_attribute *attr,
100 const char *buf, size_t count)
101{
102 return __gpuclk_store(1, dev, attr, buf, count);
103}
104
105static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
106 struct device_attribute *attr,
107 char *buf)
108{
109 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600110 struct kgsl_pwrctrl *pwr;
111 if (device == NULL)
112 return 0;
113 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114 return snprintf(buf, PAGE_SIZE, "%d\n",
115 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
116}
117
118static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
119 struct device_attribute *attr,
120 const char *buf, size_t count)
121{
122 return __gpuclk_store(0, dev, attr, buf, count);
123}
124
125static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
126 struct device_attribute *attr,
127 char *buf)
128{
129 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600130 struct kgsl_pwrctrl *pwr;
131 if (device == NULL)
132 return 0;
133 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134 return snprintf(buf, PAGE_SIZE, "%d\n",
135 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
136}
137
138static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
139 struct device_attribute *attr,
140 const char *buf, size_t count)
141{
142 char temp[20];
143 unsigned long val;
144 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600145 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 int rc;
147
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600148 if (device == NULL)
149 return 0;
150 pwr = &device->pwrctrl;
151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152 snprintf(temp, sizeof(temp), "%.*s",
153 (int)min(count, sizeof(temp) - 1), buf);
154 rc = strict_strtoul(temp, 0, &val);
155 if (rc)
156 return rc;
157
158 mutex_lock(&device->mutex);
159
160 if (val == 1)
161 pwr->nap_allowed = true;
162 else if (val == 0)
163 pwr->nap_allowed = false;
164
165 mutex_unlock(&device->mutex);
166
167 return count;
168}
169
170static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
171 struct device_attribute *attr,
172 char *buf)
173{
174 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600175 if (device == NULL)
176 return 0;
177 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178}
179
180
181static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
182 struct device_attribute *attr,
183 const char *buf, size_t count)
184{
185 char temp[20];
186 unsigned long val;
187 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600188 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 const long div = 1000/HZ;
190 static unsigned int org_interval_timeout = 1;
191 int rc;
192
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600193 if (device == NULL)
194 return 0;
195 pwr = &device->pwrctrl;
196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 snprintf(temp, sizeof(temp), "%.*s",
198 (int)min(count, sizeof(temp) - 1), buf);
199 rc = strict_strtoul(temp, 0, &val);
200 if (rc)
201 return rc;
202
203 if (org_interval_timeout == 1)
204 org_interval_timeout = pwr->interval_timeout;
205
206 mutex_lock(&device->mutex);
207
208 /* Let the timeout be requested in ms, but convert to jiffies. */
209 val /= div;
210 if (val >= org_interval_timeout)
211 pwr->interval_timeout = val;
212
213 mutex_unlock(&device->mutex);
214
215 return count;
216}
217
218static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
219 struct device_attribute *attr,
220 char *buf)
221{
222 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600223 if (device == NULL)
224 return 0;
225 return snprintf(buf, PAGE_SIZE, "%d\n",
226 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227}
228
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700229static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
230 struct device_attribute *attr,
231 char *buf)
232{
233 int ret;
234 struct kgsl_device *device = kgsl_device_from_dev(dev);
235 struct kgsl_busy *b = &device->pwrctrl.busy;
236 ret = snprintf(buf, 17, "%7d %7d\n",
237 b->on_time_old, b->time_old);
238 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
239 b->on_time_old = 0;
240 b->time_old = 0;
241 }
242 return ret;
243}
244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
246DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
247 kgsl_pwrctrl_max_gpuclk_store);
248DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
249DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
250 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700251DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
252 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253
254static const struct device_attribute *pwrctrl_attr_list[] = {
255 &dev_attr_gpuclk,
256 &dev_attr_max_gpuclk,
257 &dev_attr_pwrnap,
258 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700259 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 NULL
261};
262
263int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
264{
265 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
266}
267
268void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
269{
270 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
271}
272
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700273/* Track the amount of time the gpu is on vs the total system time. *
274 * Regularly update the percentage of busy time displayed by sysfs. */
275static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
276{
277 struct kgsl_busy *b = &device->pwrctrl.busy;
278 int elapsed;
279 if (b->start.tv_sec == 0)
280 do_gettimeofday(&(b->start));
281 do_gettimeofday(&(b->stop));
282 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
283 elapsed += b->stop.tv_usec - b->start.tv_usec;
284 b->time += elapsed;
285 if (on_time)
286 b->on_time += elapsed;
287 /* Update the output regularly and reset the counters. */
288 if ((b->time > UPDATE_BUSY_VAL) ||
289 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
290 b->on_time_old = b->on_time;
291 b->time_old = b->time;
292 b->on_time = 0;
293 b->time = 0;
294 }
295 do_gettimeofday(&(b->start));
296}
297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
299{
300 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
301 int i = 0;
302 if (state == KGSL_PWRFLAGS_OFF) {
303 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
304 &pwr->power_flags)) {
305 KGSL_PWR_INFO(device,
306 "clocks off, device %d\n", device->id);
307 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
308 if (pwr->grp_clks[i])
309 clk_disable(pwr->grp_clks[i]);
310 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
311 (device->requested_state != KGSL_STATE_NAP))
312 clk_set_rate(pwr->grp_clks[0],
313 pwr->pwrlevels[pwr->num_pwrlevels - 1].
314 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700315 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 }
317 } else if (state == KGSL_PWRFLAGS_ON) {
318 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
319 &pwr->power_flags)) {
320 KGSL_PWR_INFO(device,
321 "clocks on, device %d\n", device->id);
322
323 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
324 (device->state != KGSL_STATE_NAP))
325 clk_set_rate(pwr->grp_clks[0],
326 pwr->pwrlevels[pwr->active_pwrlevel].
327 gpu_freq);
328
329 /* as last step, enable grp_clk
330 this is to let GPU interrupt to come */
331 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
332 if (pwr->grp_clks[i])
333 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700334 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 }
336 }
337}
338EXPORT_SYMBOL(kgsl_pwrctrl_clk);
339
340void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
341{
342 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
343
344 if (state == KGSL_PWRFLAGS_OFF) {
345 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
346 &pwr->power_flags)) {
347 KGSL_PWR_INFO(device,
348 "axi off, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530349 if (pwr->ebi1_clk) {
350 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530352 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353 if (pwr->pcl)
354 msm_bus_scale_client_update_request(pwr->pcl,
355 0);
356 }
357 } else if (state == KGSL_PWRFLAGS_ON) {
358 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
359 &pwr->power_flags)) {
360 KGSL_PWR_INFO(device,
361 "axi on, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530362 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530364 clk_set_rate(pwr->ebi1_clk,
365 pwr->pwrlevels[pwr->active_pwrlevel].
366 bus_freq);
367 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368 if (pwr->pcl)
369 msm_bus_scale_client_update_request(pwr->pcl,
370 pwr->pwrlevels[pwr->active_pwrlevel].
371 bus_freq);
372 }
373 }
374}
375EXPORT_SYMBOL(kgsl_pwrctrl_axi);
376
377
378void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
379{
380 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
381
382 if (state == KGSL_PWRFLAGS_OFF) {
383 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
384 &pwr->power_flags)) {
385 KGSL_PWR_INFO(device,
386 "power off, device %d\n", device->id);
387 if (pwr->gpu_reg)
388 regulator_disable(pwr->gpu_reg);
389 }
390 } else if (state == KGSL_PWRFLAGS_ON) {
391 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
392 &pwr->power_flags)) {
393 KGSL_PWR_INFO(device,
394 "power on, device %d\n", device->id);
395 if (pwr->gpu_reg)
396 regulator_enable(pwr->gpu_reg);
397 }
398 }
399}
400EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
401
402void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
403{
404 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
405
406 if (state == KGSL_PWRFLAGS_ON) {
407 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
408 &pwr->power_flags)) {
409 KGSL_PWR_INFO(device,
410 "irq on, device %d\n", device->id);
411 enable_irq(pwr->interrupt_num);
412 device->ftbl->irqctrl(device, 1);
413 }
414 } else if (state == KGSL_PWRFLAGS_OFF) {
415 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
416 &pwr->power_flags)) {
417 KGSL_PWR_INFO(device,
418 "irq off, device %d\n", device->id);
419 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600420 if (in_interrupt())
421 disable_irq_nosync(pwr->interrupt_num);
422 else
423 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 }
425 }
426}
427EXPORT_SYMBOL(kgsl_pwrctrl_irq);
428
429int kgsl_pwrctrl_init(struct kgsl_device *device)
430{
431 int i, result = 0;
432 struct clk *clk;
433 struct platform_device *pdev =
434 container_of(device->parentdev, struct platform_device, dev);
435 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
436 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
437 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
438 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
439 pdata_dev->clk.name.clk,
440 pdata_dev->clk.name.pclk,
441 pdata_dev->imem_clk_name.clk,
442 pdata_dev->imem_clk_name.pclk};
443
444 /*acquire clocks */
445 for (i = 1; i < KGSL_MAX_CLKS; i++) {
446 if (clk_names[i]) {
447 clk = clk_get(&pdev->dev, clk_names[i]);
448 if (IS_ERR(clk))
449 goto clk_err;
450 pwr->grp_clks[i] = clk;
451 }
452 }
453 /* Make sure we have a source clk for freq setting */
454 clk = clk_get(&pdev->dev, clk_names[0]);
455 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
456
457 /* put the AXI bus into asynchronous mode with the graphics cores */
458 if (pdata_pwr->set_grp_async != NULL)
459 pdata_pwr->set_grp_async();
460
461 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
462 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
463 pdata_pwr->num_levels);
464 result = -EINVAL;
465 goto done;
466 }
467 pwr->num_pwrlevels = pdata_pwr->num_levels;
468 pwr->active_pwrlevel = pdata_pwr->init_level;
469 for (i = 0; i < pdata_pwr->num_levels; i++) {
470 pwr->pwrlevels[i].gpu_freq =
471 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
472 clk_round_rate(pwr->grp_clks[0],
473 pdata_pwr->pwrlevel[i].
474 gpu_freq) : 0;
475 pwr->pwrlevels[i].bus_freq =
476 pdata_pwr->pwrlevel[i].bus_freq;
477 }
478 /* Do not set_rate for targets in sync with AXI */
479 if (pwr->pwrlevels[0].gpu_freq > 0)
480 clk_set_rate(pwr->grp_clks[0], pwr->
481 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
482
483 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
484 if (IS_ERR(pwr->gpu_reg))
485 pwr->gpu_reg = NULL;
486
487 pwr->power_flags = 0;
488
489 pwr->nap_allowed = pdata_pwr->nap_allowed;
490 pwr->interval_timeout = pdata_pwr->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700491 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 if (IS_ERR(pwr->ebi1_clk))
493 pwr->ebi1_clk = NULL;
494 else
495 clk_set_rate(pwr->ebi1_clk,
496 pwr->pwrlevels[pwr->active_pwrlevel].
497 bus_freq);
498 if (pdata_dev->clk.bus_scale_table != NULL) {
499 pwr->pcl =
500 msm_bus_scale_register_client(pdata_dev->clk.
501 bus_scale_table);
502 if (!pwr->pcl) {
503 KGSL_PWR_ERR(device,
504 "msm_bus_scale_register_client failed: "
505 "id %d table %p", device->id,
506 pdata_dev->clk.bus_scale_table);
507 result = -EINVAL;
508 goto done;
509 }
510 }
511
512 /*acquire interrupt */
513 pwr->interrupt_num =
514 platform_get_irq_byname(pdev, pwr->irq_name);
515
516 if (pwr->interrupt_num <= 0) {
517 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
518 pwr->interrupt_num);
519 result = -EINVAL;
520 goto done;
521 }
522
523 register_early_suspend(&device->display_off);
524 return result;
525
526clk_err:
527 result = PTR_ERR(clk);
528 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
529 clk_names[i], result);
530
531done:
532 return result;
533}
534
535void kgsl_pwrctrl_close(struct kgsl_device *device)
536{
537 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
538 int i;
539
540 KGSL_PWR_INFO(device, "close device %d\n", device->id);
541
542 unregister_early_suspend(&device->display_off);
543
544 if (pwr->interrupt_num > 0) {
545 if (pwr->have_irq) {
546 free_irq(pwr->interrupt_num, NULL);
547 pwr->have_irq = 0;
548 }
549 pwr->interrupt_num = 0;
550 }
551
552 clk_put(pwr->ebi1_clk);
553
554 if (pwr->pcl)
555 msm_bus_scale_unregister_client(pwr->pcl);
556
557 pwr->pcl = 0;
558
559 if (pwr->gpu_reg) {
560 regulator_put(pwr->gpu_reg);
561 pwr->gpu_reg = NULL;
562 }
563
564 for (i = 1; i < KGSL_MAX_CLKS; i++)
565 if (pwr->grp_clks[i]) {
566 clk_put(pwr->grp_clks[i]);
567 pwr->grp_clks[i] = NULL;
568 }
569
570 pwr->grp_clks[0] = NULL;
571 pwr->power_flags = 0;
572}
573
574void kgsl_idle_check(struct work_struct *work)
575{
576 struct kgsl_device *device = container_of(work, struct kgsl_device,
577 idle_check_ws);
578
579 mutex_lock(&device->mutex);
580 if (device->requested_state != KGSL_STATE_SLEEP)
581 kgsl_pwrscale_idle(device);
582
583 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700584 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 mod_timer(&device->idle_timer,
586 jiffies +
587 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700588 /* If the GPU has been too busy to sleep, make sure *
589 * that is acurately reflected in the % busy numbers. */
590 device->pwrctrl.busy.no_nap_cnt++;
591 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
592 kgsl_pwrctrl_busy_time(device, true);
593 device->pwrctrl.busy.no_nap_cnt = 0;
594 }
595 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 } else if (device->state & (KGSL_STATE_HUNG |
597 KGSL_STATE_DUMP_AND_RECOVER)) {
598 device->requested_state = KGSL_STATE_NONE;
599 }
600
601 mutex_unlock(&device->mutex);
602}
603
604void kgsl_timer(unsigned long data)
605{
606 struct kgsl_device *device = (struct kgsl_device *) data;
607
608 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
609 if (device->requested_state != KGSL_STATE_SUSPEND) {
610 device->requested_state = KGSL_STATE_SLEEP;
611 /* Have work run in a non-interrupt context. */
612 queue_work(device->work_queue, &device->idle_check_ws);
613 }
614}
615
616void kgsl_pre_hwaccess(struct kgsl_device *device)
617{
618 BUG_ON(!mutex_is_locked(&device->mutex));
619 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
620 kgsl_pwrctrl_wake(device);
621}
622EXPORT_SYMBOL(kgsl_pre_hwaccess);
623
624void kgsl_check_suspended(struct kgsl_device *device)
625{
626 if (device->requested_state == KGSL_STATE_SUSPEND ||
627 device->state == KGSL_STATE_SUSPEND) {
628 mutex_unlock(&device->mutex);
629 wait_for_completion(&device->hwaccess_gate);
630 mutex_lock(&device->mutex);
631 }
632 if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
633 mutex_unlock(&device->mutex);
634 wait_for_completion(&device->recovery_gate);
635 mutex_lock(&device->mutex);
636 }
637 }
638
639
640/******************************************************************/
641/* Caller must hold the device mutex. */
642int kgsl_pwrctrl_sleep(struct kgsl_device *device)
643{
644 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
645 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
646
647 /* Work through the legal state transitions */
648 if (device->requested_state == KGSL_STATE_NAP) {
649 if (device->ftbl->isidle(device))
650 goto nap;
651 } else if (device->requested_state == KGSL_STATE_SLEEP) {
652 if (device->state == KGSL_STATE_NAP ||
653 device->ftbl->isidle(device))
654 goto sleep;
655 }
656
657 device->requested_state = KGSL_STATE_NONE;
658 return -EBUSY;
659
660sleep:
661 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
662 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
663 if (pwr->pwrlevels[0].gpu_freq > 0)
664 clk_set_rate(pwr->grp_clks[0],
665 pwr->pwrlevels[pwr->num_pwrlevels - 1].
666 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700667 kgsl_pwrctrl_busy_time(device, false);
668 pwr->busy.start.tv_sec = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 device->pwrctrl.time = 0;
670
671 kgsl_pwrscale_sleep(device);
672 goto clk_off;
673
674nap:
675 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
676clk_off:
677 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
678
679 device->state = device->requested_state;
680 device->requested_state = KGSL_STATE_NONE;
681 wake_unlock(&device->idle_wakelock);
682 pm_qos_update_request(&device->pm_qos_req_dma,
683 PM_QOS_DEFAULT_VALUE);
684 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
685 device->state, device->id);
686
687 return 0;
688}
689EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
690
691/******************************************************************/
692/* Caller must hold the device mutex. */
693void kgsl_pwrctrl_wake(struct kgsl_device *device)
694{
695 if (device->state == KGSL_STATE_SUSPEND)
696 return;
697
698 if (device->state != KGSL_STATE_NAP) {
699 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
700 kgsl_pwrscale_wake(device);
701 }
702
703 /* Turn on the core clocks */
704 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
705
706 /* Enable state before turning on irq */
707 device->state = KGSL_STATE_ACTIVE;
708 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
709 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
710
711 /* Re-enable HW access */
712 mod_timer(&device->idle_timer,
713 jiffies + device->pwrctrl.interval_timeout);
714
715 wake_lock(&device->idle_wakelock);
716 pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
717 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
718}
719EXPORT_SYMBOL(kgsl_pwrctrl_wake);
720
721void kgsl_pwrctrl_enable(struct kgsl_device *device)
722{
723 /* Order pwrrail/clk sequence based upon platform */
724 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
725 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
726 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
727}
728EXPORT_SYMBOL(kgsl_pwrctrl_enable);
729
730void kgsl_pwrctrl_disable(struct kgsl_device *device)
731{
732 /* Order pwrrail/clk sequence based upon platform */
733 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
734 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
735 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
736}
737EXPORT_SYMBOL(kgsl_pwrctrl_disable);