blob: 84f2b338089d7446e707a5f8d7ded074d0f3373b [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
20
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060021#define KGSL_PWRFLAGS_POWER_ON 0
22#define KGSL_PWRFLAGS_CLK_ON 1
23#define KGSL_PWRFLAGS_AXI_ON 2
24#define KGSL_PWRFLAGS_IRQ_ON 3
25
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070027#define UPDATE_BUSY_VAL 1000000
28#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
31 unsigned int new_level)
32{
33 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
34 if (new_level < (pwr->num_pwrlevels - 1) &&
35 new_level >= pwr->thermal_pwrlevel &&
36 new_level != pwr->active_pwrlevel) {
37 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060038 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
39 (device->state == KGSL_STATE_NAP))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 clk_set_rate(pwr->grp_clks[0],
41 pwr->pwrlevels[pwr->active_pwrlevel].
42 gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060043 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 if (pwr->pcl)
45 msm_bus_scale_client_update_request(pwr->pcl,
46 pwr->pwrlevels[pwr->active_pwrlevel].
47 bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060048 else if (pwr->ebi1_clk)
49 clk_set_rate(pwr->ebi1_clk,
50 pwr->pwrlevels[pwr->active_pwrlevel].
51 bus_freq);
52 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
54 pwr->active_pwrlevel);
55 }
56}
57EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
58
59static int __gpuclk_store(int max, struct device *dev,
60 struct device_attribute *attr,
61 const char *buf, size_t count)
62{ int ret, i, delta = 5000000;
63 unsigned long val;
64 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060065 struct kgsl_pwrctrl *pwr;
66
67 if (device == NULL)
68 return 0;
69 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71 ret = sscanf(buf, "%ld", &val);
72 if (ret != 1)
73 return count;
74
75 mutex_lock(&device->mutex);
76 for (i = 0; i < pwr->num_pwrlevels; i++) {
77 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
78 if (max)
79 pwr->thermal_pwrlevel = i;
80 break;
81 }
82 }
83
84 if (i == pwr->num_pwrlevels)
85 goto done;
86
87 /*
88 * If the current or requested clock speed is greater than the
89 * thermal limit, bump down immediately.
90 */
91
92 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
93 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
94 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
95 else if (!max)
96 kgsl_pwrctrl_pwrlevel_change(device, i);
97
98done:
99 mutex_unlock(&device->mutex);
100 return count;
101}
102
103static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
104 struct device_attribute *attr,
105 const char *buf, size_t count)
106{
107 return __gpuclk_store(1, dev, attr, buf, count);
108}
109
110static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113{
114 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600115 struct kgsl_pwrctrl *pwr;
116 if (device == NULL)
117 return 0;
118 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 return snprintf(buf, PAGE_SIZE, "%d\n",
120 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
121}
122
123static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
124 struct device_attribute *attr,
125 const char *buf, size_t count)
126{
127 return __gpuclk_store(0, dev, attr, buf, count);
128}
129
130static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
131 struct device_attribute *attr,
132 char *buf)
133{
134 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600135 struct kgsl_pwrctrl *pwr;
136 if (device == NULL)
137 return 0;
138 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139 return snprintf(buf, PAGE_SIZE, "%d\n",
140 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
141}
142
143static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
144 struct device_attribute *attr,
145 const char *buf, size_t count)
146{
147 char temp[20];
148 unsigned long val;
149 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600150 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 int rc;
152
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600153 if (device == NULL)
154 return 0;
155 pwr = &device->pwrctrl;
156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 snprintf(temp, sizeof(temp), "%.*s",
158 (int)min(count, sizeof(temp) - 1), buf);
159 rc = strict_strtoul(temp, 0, &val);
160 if (rc)
161 return rc;
162
163 mutex_lock(&device->mutex);
164
165 if (val == 1)
166 pwr->nap_allowed = true;
167 else if (val == 0)
168 pwr->nap_allowed = false;
169
170 mutex_unlock(&device->mutex);
171
172 return count;
173}
174
175static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
176 struct device_attribute *attr,
177 char *buf)
178{
179 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600180 if (device == NULL)
181 return 0;
182 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183}
184
185
186static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
187 struct device_attribute *attr,
188 const char *buf, size_t count)
189{
190 char temp[20];
191 unsigned long val;
192 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600193 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 const long div = 1000/HZ;
195 static unsigned int org_interval_timeout = 1;
196 int rc;
197
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600198 if (device == NULL)
199 return 0;
200 pwr = &device->pwrctrl;
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 snprintf(temp, sizeof(temp), "%.*s",
203 (int)min(count, sizeof(temp) - 1), buf);
204 rc = strict_strtoul(temp, 0, &val);
205 if (rc)
206 return rc;
207
208 if (org_interval_timeout == 1)
209 org_interval_timeout = pwr->interval_timeout;
210
211 mutex_lock(&device->mutex);
212
213 /* Let the timeout be requested in ms, but convert to jiffies. */
214 val /= div;
215 if (val >= org_interval_timeout)
216 pwr->interval_timeout = val;
217
218 mutex_unlock(&device->mutex);
219
220 return count;
221}
222
223static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
224 struct device_attribute *attr,
225 char *buf)
226{
227 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600228 if (device == NULL)
229 return 0;
230 return snprintf(buf, PAGE_SIZE, "%d\n",
231 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232}
233
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700234static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
235 struct device_attribute *attr,
236 char *buf)
237{
238 int ret;
239 struct kgsl_device *device = kgsl_device_from_dev(dev);
240 struct kgsl_busy *b = &device->pwrctrl.busy;
241 ret = snprintf(buf, 17, "%7d %7d\n",
242 b->on_time_old, b->time_old);
243 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
244 b->on_time_old = 0;
245 b->time_old = 0;
246 }
247 return ret;
248}
249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
251DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
252 kgsl_pwrctrl_max_gpuclk_store);
253DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
254DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
255 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700256DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
257 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
259static const struct device_attribute *pwrctrl_attr_list[] = {
260 &dev_attr_gpuclk,
261 &dev_attr_max_gpuclk,
262 &dev_attr_pwrnap,
263 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700264 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 NULL
266};
267
268int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
269{
270 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
271}
272
273void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
274{
275 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
276}
277
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700278/* Track the amount of time the gpu is on vs the total system time. *
279 * Regularly update the percentage of busy time displayed by sysfs. */
280static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
281{
282 struct kgsl_busy *b = &device->pwrctrl.busy;
283 int elapsed;
284 if (b->start.tv_sec == 0)
285 do_gettimeofday(&(b->start));
286 do_gettimeofday(&(b->stop));
287 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
288 elapsed += b->stop.tv_usec - b->start.tv_usec;
289 b->time += elapsed;
290 if (on_time)
291 b->on_time += elapsed;
292 /* Update the output regularly and reset the counters. */
293 if ((b->time > UPDATE_BUSY_VAL) ||
294 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
295 b->on_time_old = b->on_time;
296 b->time_old = b->time;
297 b->on_time = 0;
298 b->time = 0;
299 }
300 do_gettimeofday(&(b->start));
301}
302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
304{
305 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
306 int i = 0;
307 if (state == KGSL_PWRFLAGS_OFF) {
308 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
309 &pwr->power_flags)) {
310 KGSL_PWR_INFO(device,
311 "clocks off, device %d\n", device->id);
312 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
313 if (pwr->grp_clks[i])
314 clk_disable(pwr->grp_clks[i]);
315 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
316 (device->requested_state != KGSL_STATE_NAP))
317 clk_set_rate(pwr->grp_clks[0],
318 pwr->pwrlevels[pwr->num_pwrlevels - 1].
319 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700320 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 }
322 } else if (state == KGSL_PWRFLAGS_ON) {
323 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
324 &pwr->power_flags)) {
325 KGSL_PWR_INFO(device,
326 "clocks on, device %d\n", device->id);
327
328 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
329 (device->state != KGSL_STATE_NAP))
330 clk_set_rate(pwr->grp_clks[0],
331 pwr->pwrlevels[pwr->active_pwrlevel].
332 gpu_freq);
333
334 /* as last step, enable grp_clk
335 this is to let GPU interrupt to come */
336 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
337 if (pwr->grp_clks[i])
338 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700339 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340 }
341 }
342}
343EXPORT_SYMBOL(kgsl_pwrctrl_clk);
344
345void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
346{
347 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
348
349 if (state == KGSL_PWRFLAGS_OFF) {
350 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
351 &pwr->power_flags)) {
352 KGSL_PWR_INFO(device,
353 "axi off, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530354 if (pwr->ebi1_clk) {
355 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530357 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 if (pwr->pcl)
359 msm_bus_scale_client_update_request(pwr->pcl,
360 0);
361 }
362 } else if (state == KGSL_PWRFLAGS_ON) {
363 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
364 &pwr->power_flags)) {
365 KGSL_PWR_INFO(device,
366 "axi on, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530367 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530369 clk_set_rate(pwr->ebi1_clk,
370 pwr->pwrlevels[pwr->active_pwrlevel].
371 bus_freq);
372 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 if (pwr->pcl)
374 msm_bus_scale_client_update_request(pwr->pcl,
375 pwr->pwrlevels[pwr->active_pwrlevel].
376 bus_freq);
377 }
378 }
379}
380EXPORT_SYMBOL(kgsl_pwrctrl_axi);
381
382
383void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
384{
385 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
386
387 if (state == KGSL_PWRFLAGS_OFF) {
388 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
389 &pwr->power_flags)) {
390 KGSL_PWR_INFO(device,
391 "power off, device %d\n", device->id);
392 if (pwr->gpu_reg)
393 regulator_disable(pwr->gpu_reg);
394 }
395 } else if (state == KGSL_PWRFLAGS_ON) {
396 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
397 &pwr->power_flags)) {
398 KGSL_PWR_INFO(device,
399 "power on, device %d\n", device->id);
400 if (pwr->gpu_reg)
401 regulator_enable(pwr->gpu_reg);
402 }
403 }
404}
405EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
406
407void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
408{
409 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
410
411 if (state == KGSL_PWRFLAGS_ON) {
412 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
413 &pwr->power_flags)) {
414 KGSL_PWR_INFO(device,
415 "irq on, device %d\n", device->id);
416 enable_irq(pwr->interrupt_num);
417 device->ftbl->irqctrl(device, 1);
418 }
419 } else if (state == KGSL_PWRFLAGS_OFF) {
420 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
421 &pwr->power_flags)) {
422 KGSL_PWR_INFO(device,
423 "irq off, device %d\n", device->id);
424 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600425 if (in_interrupt())
426 disable_irq_nosync(pwr->interrupt_num);
427 else
428 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 }
430 }
431}
432EXPORT_SYMBOL(kgsl_pwrctrl_irq);
433
434int kgsl_pwrctrl_init(struct kgsl_device *device)
435{
436 int i, result = 0;
437 struct clk *clk;
438 struct platform_device *pdev =
439 container_of(device->parentdev, struct platform_device, dev);
440 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
441 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
442 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
443 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
444 pdata_dev->clk.name.clk,
445 pdata_dev->clk.name.pclk,
446 pdata_dev->imem_clk_name.clk,
447 pdata_dev->imem_clk_name.pclk};
448
449 /*acquire clocks */
450 for (i = 1; i < KGSL_MAX_CLKS; i++) {
451 if (clk_names[i]) {
452 clk = clk_get(&pdev->dev, clk_names[i]);
453 if (IS_ERR(clk))
454 goto clk_err;
455 pwr->grp_clks[i] = clk;
456 }
457 }
458 /* Make sure we have a source clk for freq setting */
459 clk = clk_get(&pdev->dev, clk_names[0]);
460 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
461
462 /* put the AXI bus into asynchronous mode with the graphics cores */
463 if (pdata_pwr->set_grp_async != NULL)
464 pdata_pwr->set_grp_async();
465
466 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
467 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
468 pdata_pwr->num_levels);
469 result = -EINVAL;
470 goto done;
471 }
472 pwr->num_pwrlevels = pdata_pwr->num_levels;
473 pwr->active_pwrlevel = pdata_pwr->init_level;
474 for (i = 0; i < pdata_pwr->num_levels; i++) {
475 pwr->pwrlevels[i].gpu_freq =
476 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
477 clk_round_rate(pwr->grp_clks[0],
478 pdata_pwr->pwrlevel[i].
479 gpu_freq) : 0;
480 pwr->pwrlevels[i].bus_freq =
481 pdata_pwr->pwrlevel[i].bus_freq;
482 }
483 /* Do not set_rate for targets in sync with AXI */
484 if (pwr->pwrlevels[0].gpu_freq > 0)
485 clk_set_rate(pwr->grp_clks[0], pwr->
486 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
487
488 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
489 if (IS_ERR(pwr->gpu_reg))
490 pwr->gpu_reg = NULL;
491
492 pwr->power_flags = 0;
493
494 pwr->nap_allowed = pdata_pwr->nap_allowed;
495 pwr->interval_timeout = pdata_pwr->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700496 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 if (IS_ERR(pwr->ebi1_clk))
498 pwr->ebi1_clk = NULL;
499 else
500 clk_set_rate(pwr->ebi1_clk,
501 pwr->pwrlevels[pwr->active_pwrlevel].
502 bus_freq);
503 if (pdata_dev->clk.bus_scale_table != NULL) {
504 pwr->pcl =
505 msm_bus_scale_register_client(pdata_dev->clk.
506 bus_scale_table);
507 if (!pwr->pcl) {
508 KGSL_PWR_ERR(device,
509 "msm_bus_scale_register_client failed: "
510 "id %d table %p", device->id,
511 pdata_dev->clk.bus_scale_table);
512 result = -EINVAL;
513 goto done;
514 }
515 }
516
517 /*acquire interrupt */
518 pwr->interrupt_num =
519 platform_get_irq_byname(pdev, pwr->irq_name);
520
521 if (pwr->interrupt_num <= 0) {
522 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
523 pwr->interrupt_num);
524 result = -EINVAL;
525 goto done;
526 }
527
528 register_early_suspend(&device->display_off);
529 return result;
530
531clk_err:
532 result = PTR_ERR(clk);
533 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
534 clk_names[i], result);
535
536done:
537 return result;
538}
539
540void kgsl_pwrctrl_close(struct kgsl_device *device)
541{
542 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
543 int i;
544
545 KGSL_PWR_INFO(device, "close device %d\n", device->id);
546
547 unregister_early_suspend(&device->display_off);
548
549 if (pwr->interrupt_num > 0) {
550 if (pwr->have_irq) {
551 free_irq(pwr->interrupt_num, NULL);
552 pwr->have_irq = 0;
553 }
554 pwr->interrupt_num = 0;
555 }
556
557 clk_put(pwr->ebi1_clk);
558
559 if (pwr->pcl)
560 msm_bus_scale_unregister_client(pwr->pcl);
561
562 pwr->pcl = 0;
563
564 if (pwr->gpu_reg) {
565 regulator_put(pwr->gpu_reg);
566 pwr->gpu_reg = NULL;
567 }
568
569 for (i = 1; i < KGSL_MAX_CLKS; i++)
570 if (pwr->grp_clks[i]) {
571 clk_put(pwr->grp_clks[i]);
572 pwr->grp_clks[i] = NULL;
573 }
574
575 pwr->grp_clks[0] = NULL;
576 pwr->power_flags = 0;
577}
578
579void kgsl_idle_check(struct work_struct *work)
580{
581 struct kgsl_device *device = container_of(work, struct kgsl_device,
582 idle_check_ws);
583
584 mutex_lock(&device->mutex);
Lucille Sylvester2cbd1ff2011-10-13 14:57:50 -0600585 if (device->ftbl->isidle(device) &&
586 (device->requested_state != KGSL_STATE_SLEEP))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 kgsl_pwrscale_idle(device);
588
589 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700590 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 mod_timer(&device->idle_timer,
592 jiffies +
593 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700594 /* If the GPU has been too busy to sleep, make sure *
595 * that is acurately reflected in the % busy numbers. */
596 device->pwrctrl.busy.no_nap_cnt++;
597 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
598 kgsl_pwrctrl_busy_time(device, true);
599 device->pwrctrl.busy.no_nap_cnt = 0;
600 }
601 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 } else if (device->state & (KGSL_STATE_HUNG |
603 KGSL_STATE_DUMP_AND_RECOVER)) {
604 device->requested_state = KGSL_STATE_NONE;
605 }
606
607 mutex_unlock(&device->mutex);
608}
609
610void kgsl_timer(unsigned long data)
611{
612 struct kgsl_device *device = (struct kgsl_device *) data;
613
614 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
615 if (device->requested_state != KGSL_STATE_SUSPEND) {
616 device->requested_state = KGSL_STATE_SLEEP;
617 /* Have work run in a non-interrupt context. */
618 queue_work(device->work_queue, &device->idle_check_ws);
619 }
620}
621
622void kgsl_pre_hwaccess(struct kgsl_device *device)
623{
624 BUG_ON(!mutex_is_locked(&device->mutex));
625 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
626 kgsl_pwrctrl_wake(device);
627}
628EXPORT_SYMBOL(kgsl_pre_hwaccess);
629
630void kgsl_check_suspended(struct kgsl_device *device)
631{
632 if (device->requested_state == KGSL_STATE_SUSPEND ||
633 device->state == KGSL_STATE_SUSPEND) {
634 mutex_unlock(&device->mutex);
635 wait_for_completion(&device->hwaccess_gate);
636 mutex_lock(&device->mutex);
637 }
638 if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
639 mutex_unlock(&device->mutex);
640 wait_for_completion(&device->recovery_gate);
641 mutex_lock(&device->mutex);
642 }
643 }
644
645
646/******************************************************************/
647/* Caller must hold the device mutex. */
648int kgsl_pwrctrl_sleep(struct kgsl_device *device)
649{
650 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
651 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
652
653 /* Work through the legal state transitions */
654 if (device->requested_state == KGSL_STATE_NAP) {
655 if (device->ftbl->isidle(device))
656 goto nap;
657 } else if (device->requested_state == KGSL_STATE_SLEEP) {
658 if (device->state == KGSL_STATE_NAP ||
659 device->ftbl->isidle(device))
660 goto sleep;
661 }
662
663 device->requested_state = KGSL_STATE_NONE;
664 return -EBUSY;
665
666sleep:
667 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
668 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
669 if (pwr->pwrlevels[0].gpu_freq > 0)
670 clk_set_rate(pwr->grp_clks[0],
671 pwr->pwrlevels[pwr->num_pwrlevels - 1].
672 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700673 kgsl_pwrctrl_busy_time(device, false);
674 pwr->busy.start.tv_sec = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675 device->pwrctrl.time = 0;
676
677 kgsl_pwrscale_sleep(device);
678 goto clk_off;
679
680nap:
681 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
682clk_off:
683 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
684
685 device->state = device->requested_state;
686 device->requested_state = KGSL_STATE_NONE;
687 wake_unlock(&device->idle_wakelock);
688 pm_qos_update_request(&device->pm_qos_req_dma,
689 PM_QOS_DEFAULT_VALUE);
690 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
691 device->state, device->id);
692
693 return 0;
694}
695EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
696
697/******************************************************************/
698/* Caller must hold the device mutex. */
699void kgsl_pwrctrl_wake(struct kgsl_device *device)
700{
701 if (device->state == KGSL_STATE_SUSPEND)
702 return;
703
704 if (device->state != KGSL_STATE_NAP) {
705 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
706 kgsl_pwrscale_wake(device);
707 }
708
709 /* Turn on the core clocks */
710 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
711
712 /* Enable state before turning on irq */
713 device->state = KGSL_STATE_ACTIVE;
714 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
715 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
716
717 /* Re-enable HW access */
718 mod_timer(&device->idle_timer,
719 jiffies + device->pwrctrl.interval_timeout);
720
721 wake_lock(&device->idle_wakelock);
722 pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
723 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
724}
725EXPORT_SYMBOL(kgsl_pwrctrl_wake);
726
727void kgsl_pwrctrl_enable(struct kgsl_device *device)
728{
729 /* Order pwrrail/clk sequence based upon platform */
730 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
731 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
732 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
733}
734EXPORT_SYMBOL(kgsl_pwrctrl_enable);
735
736void kgsl_pwrctrl_disable(struct kgsl_device *device)
737{
738 /* Order pwrrail/clk sequence based upon platform */
739 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
740 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
741 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
742}
743EXPORT_SYMBOL(kgsl_pwrctrl_disable);