blob: 7034fd884d86ddba9e199b84e32f65765cc4d7db [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070016#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017
18#include "kgsl.h"
19#include "kgsl_pwrscale.h"
20#include "kgsl_device.h"
21
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060022#define KGSL_PWRFLAGS_POWER_ON 0
23#define KGSL_PWRFLAGS_CLK_ON 1
24#define KGSL_PWRFLAGS_AXI_ON 2
25#define KGSL_PWRFLAGS_IRQ_ON 3
26
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070027#define UPDATE_BUSY_VAL 1000000
28#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
31 unsigned int new_level)
32{
33 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
34 if (new_level < (pwr->num_pwrlevels - 1) &&
35 new_level >= pwr->thermal_pwrlevel &&
36 new_level != pwr->active_pwrlevel) {
37 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060038 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
39 (device->state == KGSL_STATE_NAP))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 clk_set_rate(pwr->grp_clks[0],
41 pwr->pwrlevels[pwr->active_pwrlevel].
42 gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060043 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 if (pwr->pcl)
45 msm_bus_scale_client_update_request(pwr->pcl,
46 pwr->pwrlevels[pwr->active_pwrlevel].
47 bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060048 else if (pwr->ebi1_clk)
49 clk_set_rate(pwr->ebi1_clk,
50 pwr->pwrlevels[pwr->active_pwrlevel].
51 bus_freq);
52 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
54 pwr->active_pwrlevel);
55 }
56}
57EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
58
59static int __gpuclk_store(int max, struct device *dev,
60 struct device_attribute *attr,
61 const char *buf, size_t count)
62{ int ret, i, delta = 5000000;
63 unsigned long val;
64 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060065 struct kgsl_pwrctrl *pwr;
66
67 if (device == NULL)
68 return 0;
69 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71 ret = sscanf(buf, "%ld", &val);
72 if (ret != 1)
73 return count;
74
75 mutex_lock(&device->mutex);
76 for (i = 0; i < pwr->num_pwrlevels; i++) {
77 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
78 if (max)
79 pwr->thermal_pwrlevel = i;
80 break;
81 }
82 }
83
84 if (i == pwr->num_pwrlevels)
85 goto done;
86
87 /*
88 * If the current or requested clock speed is greater than the
89 * thermal limit, bump down immediately.
90 */
91
92 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
93 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
94 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
95 else if (!max)
96 kgsl_pwrctrl_pwrlevel_change(device, i);
97
98done:
99 mutex_unlock(&device->mutex);
100 return count;
101}
102
103static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
104 struct device_attribute *attr,
105 const char *buf, size_t count)
106{
107 return __gpuclk_store(1, dev, attr, buf, count);
108}
109
110static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113{
114 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600115 struct kgsl_pwrctrl *pwr;
116 if (device == NULL)
117 return 0;
118 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 return snprintf(buf, PAGE_SIZE, "%d\n",
120 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
121}
122
123static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
124 struct device_attribute *attr,
125 const char *buf, size_t count)
126{
127 return __gpuclk_store(0, dev, attr, buf, count);
128}
129
130static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
131 struct device_attribute *attr,
132 char *buf)
133{
134 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600135 struct kgsl_pwrctrl *pwr;
136 if (device == NULL)
137 return 0;
138 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139 return snprintf(buf, PAGE_SIZE, "%d\n",
140 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
141}
142
143static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
144 struct device_attribute *attr,
145 const char *buf, size_t count)
146{
147 char temp[20];
148 unsigned long val;
149 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600150 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 int rc;
152
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600153 if (device == NULL)
154 return 0;
155 pwr = &device->pwrctrl;
156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 snprintf(temp, sizeof(temp), "%.*s",
158 (int)min(count, sizeof(temp) - 1), buf);
159 rc = strict_strtoul(temp, 0, &val);
160 if (rc)
161 return rc;
162
163 mutex_lock(&device->mutex);
164
165 if (val == 1)
166 pwr->nap_allowed = true;
167 else if (val == 0)
168 pwr->nap_allowed = false;
169
170 mutex_unlock(&device->mutex);
171
172 return count;
173}
174
175static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
176 struct device_attribute *attr,
177 char *buf)
178{
179 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600180 if (device == NULL)
181 return 0;
182 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183}
184
185
186static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
187 struct device_attribute *attr,
188 const char *buf, size_t count)
189{
190 char temp[20];
191 unsigned long val;
192 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600193 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 const long div = 1000/HZ;
195 static unsigned int org_interval_timeout = 1;
196 int rc;
197
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600198 if (device == NULL)
199 return 0;
200 pwr = &device->pwrctrl;
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 snprintf(temp, sizeof(temp), "%.*s",
203 (int)min(count, sizeof(temp) - 1), buf);
204 rc = strict_strtoul(temp, 0, &val);
205 if (rc)
206 return rc;
207
208 if (org_interval_timeout == 1)
209 org_interval_timeout = pwr->interval_timeout;
210
211 mutex_lock(&device->mutex);
212
213 /* Let the timeout be requested in ms, but convert to jiffies. */
214 val /= div;
215 if (val >= org_interval_timeout)
216 pwr->interval_timeout = val;
217
218 mutex_unlock(&device->mutex);
219
220 return count;
221}
222
223static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
224 struct device_attribute *attr,
225 char *buf)
226{
227 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600228 if (device == NULL)
229 return 0;
230 return snprintf(buf, PAGE_SIZE, "%d\n",
231 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232}
233
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700234static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
235 struct device_attribute *attr,
236 char *buf)
237{
238 int ret;
239 struct kgsl_device *device = kgsl_device_from_dev(dev);
240 struct kgsl_busy *b = &device->pwrctrl.busy;
241 ret = snprintf(buf, 17, "%7d %7d\n",
242 b->on_time_old, b->time_old);
243 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
244 b->on_time_old = 0;
245 b->time_old = 0;
246 }
247 return ret;
248}
249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
251DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
252 kgsl_pwrctrl_max_gpuclk_store);
253DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
254DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
255 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700256DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
257 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
259static const struct device_attribute *pwrctrl_attr_list[] = {
260 &dev_attr_gpuclk,
261 &dev_attr_max_gpuclk,
262 &dev_attr_pwrnap,
263 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700264 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 NULL
266};
267
268int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
269{
270 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
271}
272
273void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
274{
275 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
276}
277
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700278/* Track the amount of time the gpu is on vs the total system time. *
279 * Regularly update the percentage of busy time displayed by sysfs. */
280static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
281{
282 struct kgsl_busy *b = &device->pwrctrl.busy;
283 int elapsed;
284 if (b->start.tv_sec == 0)
285 do_gettimeofday(&(b->start));
286 do_gettimeofday(&(b->stop));
287 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
288 elapsed += b->stop.tv_usec - b->start.tv_usec;
289 b->time += elapsed;
290 if (on_time)
291 b->on_time += elapsed;
292 /* Update the output regularly and reset the counters. */
293 if ((b->time > UPDATE_BUSY_VAL) ||
294 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
295 b->on_time_old = b->on_time;
296 b->time_old = b->time;
297 b->on_time = 0;
298 b->time = 0;
299 }
300 do_gettimeofday(&(b->start));
301}
302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
304{
305 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
306 int i = 0;
307 if (state == KGSL_PWRFLAGS_OFF) {
308 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
309 &pwr->power_flags)) {
310 KGSL_PWR_INFO(device,
311 "clocks off, device %d\n", device->id);
312 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
313 if (pwr->grp_clks[i])
314 clk_disable(pwr->grp_clks[i]);
315 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
316 (device->requested_state != KGSL_STATE_NAP))
317 clk_set_rate(pwr->grp_clks[0],
318 pwr->pwrlevels[pwr->num_pwrlevels - 1].
319 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700320 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 }
322 } else if (state == KGSL_PWRFLAGS_ON) {
323 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
324 &pwr->power_flags)) {
325 KGSL_PWR_INFO(device,
326 "clocks on, device %d\n", device->id);
327
328 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
329 (device->state != KGSL_STATE_NAP))
330 clk_set_rate(pwr->grp_clks[0],
331 pwr->pwrlevels[pwr->active_pwrlevel].
332 gpu_freq);
333
334 /* as last step, enable grp_clk
335 this is to let GPU interrupt to come */
336 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
337 if (pwr->grp_clks[i])
338 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700339 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340 }
341 }
342}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343
344void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
345{
346 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
347
348 if (state == KGSL_PWRFLAGS_OFF) {
349 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
350 &pwr->power_flags)) {
351 KGSL_PWR_INFO(device,
352 "axi off, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530353 if (pwr->ebi1_clk) {
354 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530356 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357 if (pwr->pcl)
358 msm_bus_scale_client_update_request(pwr->pcl,
359 0);
360 }
361 } else if (state == KGSL_PWRFLAGS_ON) {
362 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
363 &pwr->power_flags)) {
364 KGSL_PWR_INFO(device,
365 "axi on, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530366 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530368 clk_set_rate(pwr->ebi1_clk,
369 pwr->pwrlevels[pwr->active_pwrlevel].
370 bus_freq);
371 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 if (pwr->pcl)
373 msm_bus_scale_client_update_request(pwr->pcl,
374 pwr->pwrlevels[pwr->active_pwrlevel].
375 bus_freq);
376 }
377 }
378}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379
380void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
381{
382 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
383
384 if (state == KGSL_PWRFLAGS_OFF) {
385 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
386 &pwr->power_flags)) {
387 KGSL_PWR_INFO(device,
388 "power off, device %d\n", device->id);
389 if (pwr->gpu_reg)
390 regulator_disable(pwr->gpu_reg);
391 }
392 } else if (state == KGSL_PWRFLAGS_ON) {
393 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
394 &pwr->power_flags)) {
395 KGSL_PWR_INFO(device,
396 "power on, device %d\n", device->id);
397 if (pwr->gpu_reg)
398 regulator_enable(pwr->gpu_reg);
399 }
400 }
401}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402
403void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
404{
405 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
406
407 if (state == KGSL_PWRFLAGS_ON) {
408 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
409 &pwr->power_flags)) {
410 KGSL_PWR_INFO(device,
411 "irq on, device %d\n", device->id);
412 enable_irq(pwr->interrupt_num);
413 device->ftbl->irqctrl(device, 1);
414 }
415 } else if (state == KGSL_PWRFLAGS_OFF) {
416 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
417 &pwr->power_flags)) {
418 KGSL_PWR_INFO(device,
419 "irq off, device %d\n", device->id);
420 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600421 if (in_interrupt())
422 disable_irq_nosync(pwr->interrupt_num);
423 else
424 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 }
426 }
427}
428EXPORT_SYMBOL(kgsl_pwrctrl_irq);
429
430int kgsl_pwrctrl_init(struct kgsl_device *device)
431{
432 int i, result = 0;
433 struct clk *clk;
434 struct platform_device *pdev =
435 container_of(device->parentdev, struct platform_device, dev);
436 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
437 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
438 struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
439 const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
440 pdata_dev->clk.name.clk,
441 pdata_dev->clk.name.pclk,
442 pdata_dev->imem_clk_name.clk,
443 pdata_dev->imem_clk_name.pclk};
444
445 /*acquire clocks */
446 for (i = 1; i < KGSL_MAX_CLKS; i++) {
447 if (clk_names[i]) {
448 clk = clk_get(&pdev->dev, clk_names[i]);
449 if (IS_ERR(clk))
450 goto clk_err;
451 pwr->grp_clks[i] = clk;
452 }
453 }
454 /* Make sure we have a source clk for freq setting */
455 clk = clk_get(&pdev->dev, clk_names[0]);
456 pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
457
458 /* put the AXI bus into asynchronous mode with the graphics cores */
459 if (pdata_pwr->set_grp_async != NULL)
460 pdata_pwr->set_grp_async();
461
462 if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
463 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
464 pdata_pwr->num_levels);
465 result = -EINVAL;
466 goto done;
467 }
468 pwr->num_pwrlevels = pdata_pwr->num_levels;
469 pwr->active_pwrlevel = pdata_pwr->init_level;
470 for (i = 0; i < pdata_pwr->num_levels; i++) {
471 pwr->pwrlevels[i].gpu_freq =
472 (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
473 clk_round_rate(pwr->grp_clks[0],
474 pdata_pwr->pwrlevel[i].
475 gpu_freq) : 0;
476 pwr->pwrlevels[i].bus_freq =
477 pdata_pwr->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600478 pwr->pwrlevels[i].io_fraction =
479 pdata_pwr->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 }
481 /* Do not set_rate for targets in sync with AXI */
482 if (pwr->pwrlevels[0].gpu_freq > 0)
483 clk_set_rate(pwr->grp_clks[0], pwr->
484 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
485
486 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
487 if (IS_ERR(pwr->gpu_reg))
488 pwr->gpu_reg = NULL;
489
490 pwr->power_flags = 0;
491
492 pwr->nap_allowed = pdata_pwr->nap_allowed;
493 pwr->interval_timeout = pdata_pwr->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700494 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (IS_ERR(pwr->ebi1_clk))
496 pwr->ebi1_clk = NULL;
497 else
498 clk_set_rate(pwr->ebi1_clk,
499 pwr->pwrlevels[pwr->active_pwrlevel].
500 bus_freq);
501 if (pdata_dev->clk.bus_scale_table != NULL) {
502 pwr->pcl =
503 msm_bus_scale_register_client(pdata_dev->clk.
504 bus_scale_table);
505 if (!pwr->pcl) {
506 KGSL_PWR_ERR(device,
507 "msm_bus_scale_register_client failed: "
508 "id %d table %p", device->id,
509 pdata_dev->clk.bus_scale_table);
510 result = -EINVAL;
511 goto done;
512 }
513 }
514
515 /*acquire interrupt */
516 pwr->interrupt_num =
517 platform_get_irq_byname(pdev, pwr->irq_name);
518
519 if (pwr->interrupt_num <= 0) {
520 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
521 pwr->interrupt_num);
522 result = -EINVAL;
523 goto done;
524 }
525
526 register_early_suspend(&device->display_off);
527 return result;
528
529clk_err:
530 result = PTR_ERR(clk);
531 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
532 clk_names[i], result);
533
534done:
535 return result;
536}
537
538void kgsl_pwrctrl_close(struct kgsl_device *device)
539{
540 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
541 int i;
542
543 KGSL_PWR_INFO(device, "close device %d\n", device->id);
544
545 unregister_early_suspend(&device->display_off);
546
547 if (pwr->interrupt_num > 0) {
548 if (pwr->have_irq) {
549 free_irq(pwr->interrupt_num, NULL);
550 pwr->have_irq = 0;
551 }
552 pwr->interrupt_num = 0;
553 }
554
555 clk_put(pwr->ebi1_clk);
556
557 if (pwr->pcl)
558 msm_bus_scale_unregister_client(pwr->pcl);
559
560 pwr->pcl = 0;
561
562 if (pwr->gpu_reg) {
563 regulator_put(pwr->gpu_reg);
564 pwr->gpu_reg = NULL;
565 }
566
567 for (i = 1; i < KGSL_MAX_CLKS; i++)
568 if (pwr->grp_clks[i]) {
569 clk_put(pwr->grp_clks[i]);
570 pwr->grp_clks[i] = NULL;
571 }
572
573 pwr->grp_clks[0] = NULL;
574 pwr->power_flags = 0;
575}
576
577void kgsl_idle_check(struct work_struct *work)
578{
579 struct kgsl_device *device = container_of(work, struct kgsl_device,
580 idle_check_ws);
581
582 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju24569022011-10-27 11:11:12 -0700584 if ((device->requested_state != KGSL_STATE_SLEEP) &&
585 (device->requested_state != KGSL_STATE_SLUMBER))
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600586 kgsl_pwrscale_idle(device);
587
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700588 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 mod_timer(&device->idle_timer,
590 jiffies +
591 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700592 /* If the GPU has been too busy to sleep, make sure *
593 * that is acurately reflected in the % busy numbers. */
594 device->pwrctrl.busy.no_nap_cnt++;
595 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
596 kgsl_pwrctrl_busy_time(device, true);
597 device->pwrctrl.busy.no_nap_cnt = 0;
598 }
599 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 } else if (device->state & (KGSL_STATE_HUNG |
601 KGSL_STATE_DUMP_AND_RECOVER)) {
602 device->requested_state = KGSL_STATE_NONE;
603 }
604
605 mutex_unlock(&device->mutex);
606}
607
608void kgsl_timer(unsigned long data)
609{
610 struct kgsl_device *device = (struct kgsl_device *) data;
611
612 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
613 if (device->requested_state != KGSL_STATE_SUSPEND) {
614 device->requested_state = KGSL_STATE_SLEEP;
615 /* Have work run in a non-interrupt context. */
616 queue_work(device->work_queue, &device->idle_check_ws);
617 }
618}
619
620void kgsl_pre_hwaccess(struct kgsl_device *device)
621{
622 BUG_ON(!mutex_is_locked(&device->mutex));
Suman Tatiraju24569022011-10-27 11:11:12 -0700623 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP |
624 KGSL_STATE_SLUMBER))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 kgsl_pwrctrl_wake(device);
626}
627EXPORT_SYMBOL(kgsl_pre_hwaccess);
628
629void kgsl_check_suspended(struct kgsl_device *device)
630{
631 if (device->requested_state == KGSL_STATE_SUSPEND ||
632 device->state == KGSL_STATE_SUSPEND) {
633 mutex_unlock(&device->mutex);
634 wait_for_completion(&device->hwaccess_gate);
635 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700636 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 mutex_unlock(&device->mutex);
638 wait_for_completion(&device->recovery_gate);
639 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700640 } else if (device->state == KGSL_STATE_SLUMBER)
641 kgsl_pwrctrl_wake(device);
642}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643
Suman Tatiraju24569022011-10-27 11:11:12 -0700644static int
645_slumber(struct kgsl_device *device)
646{
647 int status = -EINVAL;
648 if (!device)
649 return -EINVAL;
650 KGSL_PWR_WARN(device, "Slumber start\n");
651
652 device->requested_state = KGSL_STATE_SLUMBER;
653 del_timer(&device->idle_timer);
654 switch (device->state) {
655 case KGSL_STATE_ACTIVE:
656 /* Wait for the device to become idle */
657 device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
658 case KGSL_STATE_NAP:
659 case KGSL_STATE_SLEEP:
660 device->ftbl->suspend_context(device);
661 device->ftbl->stop(device);
662 device->state = KGSL_STATE_SLUMBER;
663 device->pwrctrl.restore_slumber = 1;
664 KGSL_PWR_WARN(device, "state -> SLUMBER, device %d\n",
665 device->id);
666 break;
667 default:
668 break;
669 }
670 status = 0;
671 /* Don't set requested state to NONE
672 It's done in kgsl_pwrctrl_sleep*/
673 KGSL_PWR_WARN(device, "Done going to slumber\n");
674 return status;
675}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676
677/******************************************************************/
678/* Caller must hold the device mutex. */
679int kgsl_pwrctrl_sleep(struct kgsl_device *device)
680{
681 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
682 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
683
684 /* Work through the legal state transitions */
Suman Tatiraju24569022011-10-27 11:11:12 -0700685 if ((device->requested_state == KGSL_STATE_NAP)) {
686 if (device->pwrctrl.restore_slumber) {
687 device->requested_state = KGSL_STATE_NONE;
688 return 0;
689 } else if (device->ftbl->isidle(device))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 goto nap;
691 } else if (device->requested_state == KGSL_STATE_SLEEP) {
692 if (device->state == KGSL_STATE_NAP ||
Suman Tatiraju24569022011-10-27 11:11:12 -0700693 device->ftbl->isidle(device)) {
694 if (!device->pwrctrl.restore_slumber)
695 goto sleep;
696 else
697 goto slumber;
698 }
699 } else if (device->requested_state == KGSL_STATE_SLUMBER) {
Lucille Sylvesterbfecf1d2011-11-16 15:18:32 -0700700 if (device->state == KGSL_STATE_INIT)
701 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700702 if (device->ftbl->isidle(device))
703 goto slumber;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 }
705
706 device->requested_state = KGSL_STATE_NONE;
707 return -EBUSY;
708
Suman Tatiraju24569022011-10-27 11:11:12 -0700709
710slumber:
711 _slumber(device);
712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713sleep:
714 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
715 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
716 if (pwr->pwrlevels[0].gpu_freq > 0)
717 clk_set_rate(pwr->grp_clks[0],
718 pwr->pwrlevels[pwr->num_pwrlevels - 1].
719 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700720 kgsl_pwrctrl_busy_time(device, false);
721 pwr->busy.start.tv_sec = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 device->pwrctrl.time = 0;
723
724 kgsl_pwrscale_sleep(device);
725 goto clk_off;
726
727nap:
728 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
729clk_off:
730 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
731
732 device->state = device->requested_state;
733 device->requested_state = KGSL_STATE_NONE;
Lucille Sylvesteref44e7332011-11-02 13:21:17 -0700734 if (device->idle_wakelock.name)
735 wake_unlock(&device->idle_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736 KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
737 device->state, device->id);
738
739 return 0;
740}
741EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
742
Suman Tatiraju24569022011-10-27 11:11:12 -0700743static int
744_wake_from_slumber(struct kgsl_device *device)
745{
746 int status = -EINVAL;
747 if (!device)
748 return -EINVAL;
749
750 KGSL_PWR_WARN(device, "wake from slumber start\n");
751
752 device->requested_state = KGSL_STATE_ACTIVE;
753 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
754 status = device->ftbl->start(device, 0);
755 device->requested_state = KGSL_STATE_NONE;
756
757 KGSL_PWR_WARN(device, "Done waking from slumber\n");
758 return status;
759}
760
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761/******************************************************************/
762/* Caller must hold the device mutex. */
763void kgsl_pwrctrl_wake(struct kgsl_device *device)
764{
Lucille Sylvesterbfecf1d2011-11-16 15:18:32 -0700765 if (device->state & (KGSL_STATE_SUSPEND | KGSL_STATE_INIT))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 return;
767
Suman Tatiraju24569022011-10-27 11:11:12 -0700768 if (device->state == KGSL_STATE_SLUMBER)
769 _wake_from_slumber(device);
770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 if (device->state != KGSL_STATE_NAP) {
772 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
773 kgsl_pwrscale_wake(device);
774 }
775
776 /* Turn on the core clocks */
777 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
778
779 /* Enable state before turning on irq */
780 device->state = KGSL_STATE_ACTIVE;
781 KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
782 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
783
784 /* Re-enable HW access */
785 mod_timer(&device->idle_timer,
786 jiffies + device->pwrctrl.interval_timeout);
787
Lucille Sylvesteref44e7332011-11-02 13:21:17 -0700788 if (device->idle_wakelock.name)
789 wake_lock(&device->idle_wakelock);
790
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791 KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
792}
793EXPORT_SYMBOL(kgsl_pwrctrl_wake);
794
795void kgsl_pwrctrl_enable(struct kgsl_device *device)
796{
797 /* Order pwrrail/clk sequence based upon platform */
798 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
799 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
800 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
801}
802EXPORT_SYMBOL(kgsl_pwrctrl_enable);
803
804void kgsl_pwrctrl_disable(struct kgsl_device *device)
805{
806 /* Order pwrrail/clk sequence based upon platform */
807 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
808 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
809 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
810}
811EXPORT_SYMBOL(kgsl_pwrctrl_disable);