blob: 64397e06ca60d93aa8efc81fdad9b17babeaa8e4 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
16
17#include "kgsl.h"
18#include "kgsl_pwrscale.h"
19#include "kgsl_device.h"
20
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060021#define KGSL_PWRFLAGS_POWER_ON 0
22#define KGSL_PWRFLAGS_CLK_ON 1
23#define KGSL_PWRFLAGS_AXI_ON 2
24#define KGSL_PWRFLAGS_IRQ_ON 3
25
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070027#define UPDATE_BUSY_VAL 1000000
28#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
31 unsigned int new_level)
32{
33 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
34 if (new_level < (pwr->num_pwrlevels - 1) &&
35 new_level >= pwr->thermal_pwrlevel &&
36 new_level != pwr->active_pwrlevel) {
37 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060038 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
39 (device->state == KGSL_STATE_NAP))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 clk_set_rate(pwr->grp_clks[0],
41 pwr->pwrlevels[pwr->active_pwrlevel].
42 gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060043 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 if (pwr->pcl)
45 msm_bus_scale_client_update_request(pwr->pcl,
46 pwr->pwrlevels[pwr->active_pwrlevel].
47 bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060048 else if (pwr->ebi1_clk)
49 clk_set_rate(pwr->ebi1_clk,
50 pwr->pwrlevels[pwr->active_pwrlevel].
51 bus_freq);
52 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
54 pwr->active_pwrlevel);
55 }
56}
57EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
58
59static int __gpuclk_store(int max, struct device *dev,
60 struct device_attribute *attr,
61 const char *buf, size_t count)
62{ int ret, i, delta = 5000000;
63 unsigned long val;
64 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060065 struct kgsl_pwrctrl *pwr;
66
67 if (device == NULL)
68 return 0;
69 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71 ret = sscanf(buf, "%ld", &val);
72 if (ret != 1)
73 return count;
74
75 mutex_lock(&device->mutex);
76 for (i = 0; i < pwr->num_pwrlevels; i++) {
77 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
78 if (max)
79 pwr->thermal_pwrlevel = i;
80 break;
81 }
82 }
83
84 if (i == pwr->num_pwrlevels)
85 goto done;
86
87 /*
88 * If the current or requested clock speed is greater than the
89 * thermal limit, bump down immediately.
90 */
91
92 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
93 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
94 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
95 else if (!max)
96 kgsl_pwrctrl_pwrlevel_change(device, i);
97
98done:
99 mutex_unlock(&device->mutex);
100 return count;
101}
102
103static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
104 struct device_attribute *attr,
105 const char *buf, size_t count)
106{
107 return __gpuclk_store(1, dev, attr, buf, count);
108}
109
110static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113{
114 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600115 struct kgsl_pwrctrl *pwr;
116 if (device == NULL)
117 return 0;
118 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 return snprintf(buf, PAGE_SIZE, "%d\n",
120 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
121}
122
123static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
124 struct device_attribute *attr,
125 const char *buf, size_t count)
126{
127 return __gpuclk_store(0, dev, attr, buf, count);
128}
129
130static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
131 struct device_attribute *attr,
132 char *buf)
133{
134 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600135 struct kgsl_pwrctrl *pwr;
136 if (device == NULL)
137 return 0;
138 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139 return snprintf(buf, PAGE_SIZE, "%d\n",
140 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
141}
142
143static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
144 struct device_attribute *attr,
145 const char *buf, size_t count)
146{
147 char temp[20];
148 unsigned long val;
149 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600150 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 int rc;
152
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600153 if (device == NULL)
154 return 0;
155 pwr = &device->pwrctrl;
156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 snprintf(temp, sizeof(temp), "%.*s",
158 (int)min(count, sizeof(temp) - 1), buf);
159 rc = strict_strtoul(temp, 0, &val);
160 if (rc)
161 return rc;
162
163 mutex_lock(&device->mutex);
164
165 if (val == 1)
166 pwr->nap_allowed = true;
167 else if (val == 0)
168 pwr->nap_allowed = false;
169
170 mutex_unlock(&device->mutex);
171
172 return count;
173}
174
175static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
176 struct device_attribute *attr,
177 char *buf)
178{
179 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600180 if (device == NULL)
181 return 0;
182 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183}
184
185
186static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
187 struct device_attribute *attr,
188 const char *buf, size_t count)
189{
190 char temp[20];
191 unsigned long val;
192 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600193 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 const long div = 1000/HZ;
195 static unsigned int org_interval_timeout = 1;
196 int rc;
197
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600198 if (device == NULL)
199 return 0;
200 pwr = &device->pwrctrl;
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 snprintf(temp, sizeof(temp), "%.*s",
203 (int)min(count, sizeof(temp) - 1), buf);
204 rc = strict_strtoul(temp, 0, &val);
205 if (rc)
206 return rc;
207
208 if (org_interval_timeout == 1)
209 org_interval_timeout = pwr->interval_timeout;
210
211 mutex_lock(&device->mutex);
212
213 /* Let the timeout be requested in ms, but convert to jiffies. */
214 val /= div;
215 if (val >= org_interval_timeout)
216 pwr->interval_timeout = val;
217
218 mutex_unlock(&device->mutex);
219
220 return count;
221}
222
223static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
224 struct device_attribute *attr,
225 char *buf)
226{
227 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600228 if (device == NULL)
229 return 0;
230 return snprintf(buf, PAGE_SIZE, "%d\n",
231 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232}
233
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700234static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
235 struct device_attribute *attr,
236 char *buf)
237{
238 int ret;
239 struct kgsl_device *device = kgsl_device_from_dev(dev);
240 struct kgsl_busy *b = &device->pwrctrl.busy;
241 ret = snprintf(buf, 17, "%7d %7d\n",
242 b->on_time_old, b->time_old);
243 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
244 b->on_time_old = 0;
245 b->time_old = 0;
246 }
247 return ret;
248}
249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
251DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
252 kgsl_pwrctrl_max_gpuclk_store);
253DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
254DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
255 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700256DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
257 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
259static const struct device_attribute *pwrctrl_attr_list[] = {
260 &dev_attr_gpuclk,
261 &dev_attr_max_gpuclk,
262 &dev_attr_pwrnap,
263 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700264 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 NULL
266};
267
268int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
269{
270 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
271}
272
273void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
274{
275 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
276}
277
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700278/* Track the amount of time the gpu is on vs the total system time. *
279 * Regularly update the percentage of busy time displayed by sysfs. */
280static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
281{
282 struct kgsl_busy *b = &device->pwrctrl.busy;
283 int elapsed;
284 if (b->start.tv_sec == 0)
285 do_gettimeofday(&(b->start));
286 do_gettimeofday(&(b->stop));
287 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
288 elapsed += b->stop.tv_usec - b->start.tv_usec;
289 b->time += elapsed;
290 if (on_time)
291 b->on_time += elapsed;
292 /* Update the output regularly and reset the counters. */
293 if ((b->time > UPDATE_BUSY_VAL) ||
294 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
295 b->on_time_old = b->on_time;
296 b->time_old = b->time;
297 b->on_time = 0;
298 b->time = 0;
299 }
300 do_gettimeofday(&(b->start));
301}
302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
304{
305 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
306 int i = 0;
307 if (state == KGSL_PWRFLAGS_OFF) {
308 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
309 &pwr->power_flags)) {
310 KGSL_PWR_INFO(device,
311 "clocks off, device %d\n", device->id);
312 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
313 if (pwr->grp_clks[i])
314 clk_disable(pwr->grp_clks[i]);
315 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
316 (device->requested_state != KGSL_STATE_NAP))
317 clk_set_rate(pwr->grp_clks[0],
318 pwr->pwrlevels[pwr->num_pwrlevels - 1].
319 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700320 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 }
322 } else if (state == KGSL_PWRFLAGS_ON) {
323 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
324 &pwr->power_flags)) {
325 KGSL_PWR_INFO(device,
326 "clocks on, device %d\n", device->id);
327
328 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
329 (device->state != KGSL_STATE_NAP))
330 clk_set_rate(pwr->grp_clks[0],
331 pwr->pwrlevels[pwr->active_pwrlevel].
332 gpu_freq);
333
334 /* as last step, enable grp_clk
335 this is to let GPU interrupt to come */
336 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
337 if (pwr->grp_clks[i])
338 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700339 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340 }
341 }
342}
343EXPORT_SYMBOL(kgsl_pwrctrl_clk);
344
345void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
346{
347 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
348
349 if (state == KGSL_PWRFLAGS_OFF) {
350 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
351 &pwr->power_flags)) {
352 KGSL_PWR_INFO(device,
353 "axi off, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530354 if (pwr->ebi1_clk) {
355 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530357 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 if (pwr->pcl)
359 msm_bus_scale_client_update_request(pwr->pcl,
360 0);
361 }
362 } else if (state == KGSL_PWRFLAGS_ON) {
363 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
364 &pwr->power_flags)) {
365 KGSL_PWR_INFO(device,
366 "axi on, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530367 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530369 clk_set_rate(pwr->ebi1_clk,
370 pwr->pwrlevels[pwr->active_pwrlevel].
371 bus_freq);
372 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 if (pwr->pcl)
374 msm_bus_scale_client_update_request(pwr->pcl,
375 pwr->pwrlevels[pwr->active_pwrlevel].
376 bus_freq);
377 }
378 }
379}
380EXPORT_SYMBOL(kgsl_pwrctrl_axi);
381
382
383void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
384{
385 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
386
387 if (state == KGSL_PWRFLAGS_OFF) {
388 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
389 &pwr->power_flags)) {
390 KGSL_PWR_INFO(device,
391 "power off, device %d\n", device->id);
392 if (pwr->gpu_reg)
393 regulator_disable(pwr->gpu_reg);
394 }
395 } else if (state == KGSL_PWRFLAGS_ON) {
396 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
397 &pwr->power_flags)) {
398 KGSL_PWR_INFO(device,
399 "power on, device %d\n", device->id);
400 if (pwr->gpu_reg)
401 regulator_enable(pwr->gpu_reg);
402 }
403 }
404}
405EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
406
407void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
408{
409 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
410
411 if (state == KGSL_PWRFLAGS_ON) {
412 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
413 &pwr->power_flags)) {
414 KGSL_PWR_INFO(device,
415 "irq on, device %d\n", device->id);
416 enable_irq(pwr->interrupt_num);
417 device->ftbl->irqctrl(device, 1);
418 }
419 } else if (state == KGSL_PWRFLAGS_OFF) {
420 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
421 &pwr->power_flags)) {
422 KGSL_PWR_INFO(device,
423 "irq off, device %d\n", device->id);
424 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600425 if (in_interrupt())
426 disable_irq_nosync(pwr->interrupt_num);
427 else
428 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 }
430 }
431}
432EXPORT_SYMBOL(kgsl_pwrctrl_irq);
433
434int kgsl_pwrctrl_init(struct kgsl_device *device)
435{
436 int i, result = 0;
437 struct clk *clk;
438 struct platform_device *pdev =
439 container_of(device->parentdev, struct platform_device, dev);
440 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
441 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
442 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
443 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
444 pdata_dev->clk.name.clk,
445 pdata_dev->clk.name.pclk,
446 pdata_dev->imem_clk_name.clk,
447 pdata_dev->imem_clk_name.pclk};
448
449 /*acquire clocks */
450 for (i = 1; i < KGSL_MAX_CLKS; i++) {
451 if (clk_names[i]) {
452 clk = clk_get(&pdev->dev, clk_names[i]);
453 if (IS_ERR(clk))
454 goto clk_err;
455 pwr->grp_clks[i] = clk;
456 }
457 }
458 /* Make sure we have a source clk for freq setting */
459 clk = clk_get(&pdev->dev, clk_names[0]);
460 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
461
462 /* put the AXI bus into asynchronous mode with the graphics cores */
463 if (pdata_pwr->set_grp_async != NULL)
464 pdata_pwr->set_grp_async();
465
466 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
467 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
468 pdata_pwr->num_levels);
469 result = -EINVAL;
470 goto done;
471 }
472 pwr->num_pwrlevels = pdata_pwr->num_levels;
473 pwr->active_pwrlevel = pdata_pwr->init_level;
474 for (i = 0; i < pdata_pwr->num_levels; i++) {
475 pwr->pwrlevels[i].gpu_freq =
476 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
477 clk_round_rate(pwr->grp_clks[0],
478 pdata_pwr->pwrlevel[i].
479 gpu_freq) : 0;
480 pwr->pwrlevels[i].bus_freq =
481 pdata_pwr->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600482 pwr->pwrlevels[i].io_fraction =
483 pdata_pwr->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 }
485 /* Do not set_rate for targets in sync with AXI */
486 if (pwr->pwrlevels[0].gpu_freq > 0)
487 clk_set_rate(pwr->grp_clks[0], pwr->
488 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
489
490 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
491 if (IS_ERR(pwr->gpu_reg))
492 pwr->gpu_reg = NULL;
493
494 pwr->power_flags = 0;
495
496 pwr->nap_allowed = pdata_pwr->nap_allowed;
497 pwr->interval_timeout = pdata_pwr->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700498 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 if (IS_ERR(pwr->ebi1_clk))
500 pwr->ebi1_clk = NULL;
501 else
502 clk_set_rate(pwr->ebi1_clk,
503 pwr->pwrlevels[pwr->active_pwrlevel].
504 bus_freq);
505 if (pdata_dev->clk.bus_scale_table != NULL) {
506 pwr->pcl =
507 msm_bus_scale_register_client(pdata_dev->clk.
508 bus_scale_table);
509 if (!pwr->pcl) {
510 KGSL_PWR_ERR(device,
511 "msm_bus_scale_register_client failed: "
512 "id %d table %p", device->id,
513 pdata_dev->clk.bus_scale_table);
514 result = -EINVAL;
515 goto done;
516 }
517 }
518
519 /*acquire interrupt */
520 pwr->interrupt_num =
521 platform_get_irq_byname(pdev, pwr->irq_name);
522
523 if (pwr->interrupt_num <= 0) {
524 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
525 pwr->interrupt_num);
526 result = -EINVAL;
527 goto done;
528 }
529
530 register_early_suspend(&device->display_off);
531 return result;
532
533clk_err:
534 result = PTR_ERR(clk);
535 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
536 clk_names[i], result);
537
538done:
539 return result;
540}
541
542void kgsl_pwrctrl_close(struct kgsl_device *device)
543{
544 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
545 int i;
546
547 KGSL_PWR_INFO(device, "close device %d\n", device->id);
548
549 unregister_early_suspend(&device->display_off);
550
551 if (pwr->interrupt_num > 0) {
552 if (pwr->have_irq) {
553 free_irq(pwr->interrupt_num, NULL);
554 pwr->have_irq = 0;
555 }
556 pwr->interrupt_num = 0;
557 }
558
559 clk_put(pwr->ebi1_clk);
560
561 if (pwr->pcl)
562 msm_bus_scale_unregister_client(pwr->pcl);
563
564 pwr->pcl = 0;
565
566 if (pwr->gpu_reg) {
567 regulator_put(pwr->gpu_reg);
568 pwr->gpu_reg = NULL;
569 }
570
571 for (i = 1; i < KGSL_MAX_CLKS; i++)
572 if (pwr->grp_clks[i]) {
573 clk_put(pwr->grp_clks[i]);
574 pwr->grp_clks[i] = NULL;
575 }
576
577 pwr->grp_clks[0] = NULL;
578 pwr->power_flags = 0;
579}
580
581void kgsl_idle_check(struct work_struct *work)
582{
583 struct kgsl_device *device = container_of(work, struct kgsl_device,
584 idle_check_ws);
585
586 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju24569022011-10-27 11:11:12 -0700588 if ((device->requested_state != KGSL_STATE_SLEEP) &&
589 (device->requested_state != KGSL_STATE_SLUMBER))
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600590 kgsl_pwrscale_idle(device);
591
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700592 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 mod_timer(&device->idle_timer,
594 jiffies +
595 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700596 /* If the GPU has been too busy to sleep, make sure *
597 * that is acurately reflected in the % busy numbers. */
598 device->pwrctrl.busy.no_nap_cnt++;
599 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
600 kgsl_pwrctrl_busy_time(device, true);
601 device->pwrctrl.busy.no_nap_cnt = 0;
602 }
603 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 } else if (device->state & (KGSL_STATE_HUNG |
605 KGSL_STATE_DUMP_AND_RECOVER)) {
606 device->requested_state = KGSL_STATE_NONE;
607 }
608
609 mutex_unlock(&device->mutex);
610}
611
612void kgsl_timer(unsigned long data)
613{
614 struct kgsl_device *device = (struct kgsl_device *) data;
615
616 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
617 if (device->requested_state != KGSL_STATE_SUSPEND) {
618 device->requested_state = KGSL_STATE_SLEEP;
619 /* Have work run in a non-interrupt context. */
620 queue_work(device->work_queue, &device->idle_check_ws);
621 }
622}
623
624void kgsl_pre_hwaccess(struct kgsl_device *device)
625{
626 BUG_ON(!mutex_is_locked(&device->mutex));
Suman Tatiraju24569022011-10-27 11:11:12 -0700627 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP |
628 KGSL_STATE_SLUMBER))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 kgsl_pwrctrl_wake(device);
630}
631EXPORT_SYMBOL(kgsl_pre_hwaccess);
632
633void kgsl_check_suspended(struct kgsl_device *device)
634{
635 if (device->requested_state == KGSL_STATE_SUSPEND ||
636 device->state == KGSL_STATE_SUSPEND) {
637 mutex_unlock(&device->mutex);
638 wait_for_completion(&device->hwaccess_gate);
639 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700640 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 mutex_unlock(&device->mutex);
642 wait_for_completion(&device->recovery_gate);
643 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700644 } else if (device->state == KGSL_STATE_SLUMBER)
645 kgsl_pwrctrl_wake(device);
646}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647
Suman Tatiraju24569022011-10-27 11:11:12 -0700648static int
649_slumber(struct kgsl_device *device)
650{
651 int status = -EINVAL;
652 if (!device)
653 return -EINVAL;
654 KGSL_PWR_WARN(device, "Slumber start\n");
655
656 device->requested_state = KGSL_STATE_SLUMBER;
657 del_timer(&device->idle_timer);
658 switch (device->state) {
659 case KGSL_STATE_ACTIVE:
660 /* Wait for the device to become idle */
661 device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
662 case KGSL_STATE_NAP:
663 case KGSL_STATE_SLEEP:
664 device->ftbl->suspend_context(device);
665 device->ftbl->stop(device);
666 device->state = KGSL_STATE_SLUMBER;
667 device->pwrctrl.restore_slumber = 1;
668 KGSL_PWR_WARN(device, "state -> SLUMBER, device %d\n",
669 device->id);
670 break;
671 default:
672 break;
673 }
674 status = 0;
675 /* Don't set requested state to NONE
676 It's done in kgsl_pwrctrl_sleep*/
677 KGSL_PWR_WARN(device, "Done going to slumber\n");
678 return status;
679}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680
681/******************************************************************/
682/* Caller must hold the device mutex. */
683int kgsl_pwrctrl_sleep(struct kgsl_device *device)
684{
685 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
686 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
687
688 /* Work through the legal state transitions */
Suman Tatiraju24569022011-10-27 11:11:12 -0700689 if ((device->requested_state == KGSL_STATE_NAP)) {
690 if (device->pwrctrl.restore_slumber) {
691 device->requested_state = KGSL_STATE_NONE;
692 return 0;
693 } else if (device->ftbl->isidle(device))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 goto nap;
695 } else if (device->requested_state == KGSL_STATE_SLEEP) {
696 if (device->state == KGSL_STATE_NAP ||
Suman Tatiraju24569022011-10-27 11:11:12 -0700697 device->ftbl->isidle(device)) {
698 if (!device->pwrctrl.restore_slumber)
699 goto sleep;
700 else
701 goto slumber;
702 }
703 } else if (device->requested_state == KGSL_STATE_SLUMBER) {
704 if (device->ftbl->isidle(device))
705 goto slumber;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 }
707
708 device->requested_state = KGSL_STATE_NONE;
709 return -EBUSY;
710
Suman Tatiraju24569022011-10-27 11:11:12 -0700711
712slumber:
713 _slumber(device);
714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715sleep:
716 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
717 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
718 if (pwr->pwrlevels[0].gpu_freq > 0)
719 clk_set_rate(pwr->grp_clks[0],
720 pwr->pwrlevels[pwr->num_pwrlevels - 1].
721 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700722 kgsl_pwrctrl_busy_time(device, false);
723 pwr->busy.start.tv_sec = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 device->pwrctrl.time = 0;
725
726 kgsl_pwrscale_sleep(device);
727 goto clk_off;
728
729nap:
730 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
731clk_off:
732 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
733
734 device->state = device->requested_state;
735 device->requested_state = KGSL_STATE_NONE;
736 wake_unlock(&device->idle_wakelock);
737 pm_qos_update_request(&device->pm_qos_req_dma,
738 PM_QOS_DEFAULT_VALUE);
739 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
740 device->state, device->id);
741
742 return 0;
743}
744EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
745
Suman Tatiraju24569022011-10-27 11:11:12 -0700746static int
747_wake_from_slumber(struct kgsl_device *device)
748{
749 int status = -EINVAL;
750 if (!device)
751 return -EINVAL;
752
753 KGSL_PWR_WARN(device, "wake from slumber start\n");
754
755 device->requested_state = KGSL_STATE_ACTIVE;
756 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
757 status = device->ftbl->start(device, 0);
758 device->requested_state = KGSL_STATE_NONE;
759
760 KGSL_PWR_WARN(device, "Done waking from slumber\n");
761 return status;
762}
763
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764/******************************************************************/
765/* Caller must hold the device mutex. */
766void kgsl_pwrctrl_wake(struct kgsl_device *device)
767{
768 if (device->state == KGSL_STATE_SUSPEND)
769 return;
770
Suman Tatiraju24569022011-10-27 11:11:12 -0700771 if (device->state == KGSL_STATE_SLUMBER)
772 _wake_from_slumber(device);
773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 if (device->state != KGSL_STATE_NAP) {
775 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
776 kgsl_pwrscale_wake(device);
777 }
778
779 /* Turn on the core clocks */
780 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
781
782 /* Enable state before turning on irq */
783 device->state = KGSL_STATE_ACTIVE;
784 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
785 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
786
787 /* Re-enable HW access */
788 mod_timer(&device->idle_timer,
789 jiffies + device->pwrctrl.interval_timeout);
790
791 wake_lock(&device->idle_wakelock);
792 pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
793 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
794}
795EXPORT_SYMBOL(kgsl_pwrctrl_wake);
796
797void kgsl_pwrctrl_enable(struct kgsl_device *device)
798{
799 /* Order pwrrail/clk sequence based upon platform */
800 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
801 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
802 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
803}
804EXPORT_SYMBOL(kgsl_pwrctrl_enable);
805
806void kgsl_pwrctrl_disable(struct kgsl_device *device)
807{
808 /* Order pwrrail/clk sequence based upon platform */
809 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
810 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
811 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
812}
813EXPORT_SYMBOL(kgsl_pwrctrl_disable);