blob: 89b4df118f5c74c43a9602fe701bcf32391c3391 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070016#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017
18#include "kgsl.h"
19#include "kgsl_pwrscale.h"
20#include "kgsl_device.h"
21
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060022#define KGSL_PWRFLAGS_POWER_ON 0
23#define KGSL_PWRFLAGS_CLK_ON 1
24#define KGSL_PWRFLAGS_AXI_ON 2
25#define KGSL_PWRFLAGS_IRQ_ON 3
26
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070027#define UPDATE_BUSY_VAL 1000000
28#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060030struct clk_pair {
31 const char *name;
32 uint map;
33};
34
35struct clk_pair clks[KGSL_MAX_CLKS] = {
36 {
37 .name = "src_clk",
38 .map = KGSL_CLK_SRC,
39 },
40 {
41 .name = "core_clk",
42 .map = KGSL_CLK_CORE,
43 },
44 {
45 .name = "iface_clk",
46 .map = KGSL_CLK_IFACE,
47 },
48 {
49 .name = "mem_clk",
50 .map = KGSL_CLK_MEM,
51 },
52 {
53 .name = "mem_iface_clk",
54 .map = KGSL_CLK_MEM_IFACE,
55 },
56};
57
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
59 unsigned int new_level)
60{
61 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
62 if (new_level < (pwr->num_pwrlevels - 1) &&
63 new_level >= pwr->thermal_pwrlevel &&
64 new_level != pwr->active_pwrlevel) {
65 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060066 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
67 (device->state == KGSL_STATE_NAP))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068 clk_set_rate(pwr->grp_clks[0],
69 pwr->pwrlevels[pwr->active_pwrlevel].
70 gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060071 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072 if (pwr->pcl)
73 msm_bus_scale_client_update_request(pwr->pcl,
74 pwr->pwrlevels[pwr->active_pwrlevel].
75 bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060076 else if (pwr->ebi1_clk)
77 clk_set_rate(pwr->ebi1_clk,
78 pwr->pwrlevels[pwr->active_pwrlevel].
79 bus_freq);
80 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
82 pwr->active_pwrlevel);
83 }
84}
85EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
86
87static int __gpuclk_store(int max, struct device *dev,
88 struct device_attribute *attr,
89 const char *buf, size_t count)
90{ int ret, i, delta = 5000000;
91 unsigned long val;
92 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060093 struct kgsl_pwrctrl *pwr;
94
95 if (device == NULL)
96 return 0;
97 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99 ret = sscanf(buf, "%ld", &val);
100 if (ret != 1)
101 return count;
102
103 mutex_lock(&device->mutex);
104 for (i = 0; i < pwr->num_pwrlevels; i++) {
105 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
106 if (max)
107 pwr->thermal_pwrlevel = i;
108 break;
109 }
110 }
111
112 if (i == pwr->num_pwrlevels)
113 goto done;
114
115 /*
116 * If the current or requested clock speed is greater than the
117 * thermal limit, bump down immediately.
118 */
119
120 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
121 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
122 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
123 else if (!max)
124 kgsl_pwrctrl_pwrlevel_change(device, i);
125
126done:
127 mutex_unlock(&device->mutex);
128 return count;
129}
130
131static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
132 struct device_attribute *attr,
133 const char *buf, size_t count)
134{
135 return __gpuclk_store(1, dev, attr, buf, count);
136}
137
138static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
139 struct device_attribute *attr,
140 char *buf)
141{
142 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600143 struct kgsl_pwrctrl *pwr;
144 if (device == NULL)
145 return 0;
146 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147 return snprintf(buf, PAGE_SIZE, "%d\n",
148 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
149}
150
151static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
152 struct device_attribute *attr,
153 const char *buf, size_t count)
154{
155 return __gpuclk_store(0, dev, attr, buf, count);
156}
157
158static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
159 struct device_attribute *attr,
160 char *buf)
161{
162 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600163 struct kgsl_pwrctrl *pwr;
164 if (device == NULL)
165 return 0;
166 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 return snprintf(buf, PAGE_SIZE, "%d\n",
168 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
169}
170
171static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
172 struct device_attribute *attr,
173 const char *buf, size_t count)
174{
175 char temp[20];
176 unsigned long val;
177 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600178 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 int rc;
180
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600181 if (device == NULL)
182 return 0;
183 pwr = &device->pwrctrl;
184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 snprintf(temp, sizeof(temp), "%.*s",
186 (int)min(count, sizeof(temp) - 1), buf);
187 rc = strict_strtoul(temp, 0, &val);
188 if (rc)
189 return rc;
190
191 mutex_lock(&device->mutex);
192
193 if (val == 1)
194 pwr->nap_allowed = true;
195 else if (val == 0)
196 pwr->nap_allowed = false;
197
198 mutex_unlock(&device->mutex);
199
200 return count;
201}
202
203static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
204 struct device_attribute *attr,
205 char *buf)
206{
207 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600208 if (device == NULL)
209 return 0;
210 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211}
212
213
214static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
215 struct device_attribute *attr,
216 const char *buf, size_t count)
217{
218 char temp[20];
219 unsigned long val;
220 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600221 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 const long div = 1000/HZ;
223 static unsigned int org_interval_timeout = 1;
224 int rc;
225
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600226 if (device == NULL)
227 return 0;
228 pwr = &device->pwrctrl;
229
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 snprintf(temp, sizeof(temp), "%.*s",
231 (int)min(count, sizeof(temp) - 1), buf);
232 rc = strict_strtoul(temp, 0, &val);
233 if (rc)
234 return rc;
235
236 if (org_interval_timeout == 1)
237 org_interval_timeout = pwr->interval_timeout;
238
239 mutex_lock(&device->mutex);
240
241 /* Let the timeout be requested in ms, but convert to jiffies. */
242 val /= div;
243 if (val >= org_interval_timeout)
244 pwr->interval_timeout = val;
245
246 mutex_unlock(&device->mutex);
247
248 return count;
249}
250
251static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
252 struct device_attribute *attr,
253 char *buf)
254{
255 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600256 if (device == NULL)
257 return 0;
258 return snprintf(buf, PAGE_SIZE, "%d\n",
259 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260}
261
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700262static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
263 struct device_attribute *attr,
264 char *buf)
265{
266 int ret;
267 struct kgsl_device *device = kgsl_device_from_dev(dev);
268 struct kgsl_busy *b = &device->pwrctrl.busy;
269 ret = snprintf(buf, 17, "%7d %7d\n",
270 b->on_time_old, b->time_old);
271 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
272 b->on_time_old = 0;
273 b->time_old = 0;
274 }
275 return ret;
276}
277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
279DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
280 kgsl_pwrctrl_max_gpuclk_store);
Lucille Sylvester67138c92011-12-07 17:26:29 -0700281DEVICE_ATTR(pwrnap, 0666, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
283 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700284DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
285 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286
287static const struct device_attribute *pwrctrl_attr_list[] = {
288 &dev_attr_gpuclk,
289 &dev_attr_max_gpuclk,
290 &dev_attr_pwrnap,
291 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700292 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 NULL
294};
295
296int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
297{
298 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
299}
300
301void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
302{
303 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
304}
305
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700306/* Track the amount of time the gpu is on vs the total system time. *
307 * Regularly update the percentage of busy time displayed by sysfs. */
308static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
309{
310 struct kgsl_busy *b = &device->pwrctrl.busy;
311 int elapsed;
312 if (b->start.tv_sec == 0)
313 do_gettimeofday(&(b->start));
314 do_gettimeofday(&(b->stop));
315 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
316 elapsed += b->stop.tv_usec - b->start.tv_usec;
317 b->time += elapsed;
318 if (on_time)
319 b->on_time += elapsed;
320 /* Update the output regularly and reset the counters. */
321 if ((b->time > UPDATE_BUSY_VAL) ||
322 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
323 b->on_time_old = b->on_time;
324 b->time_old = b->time;
325 b->on_time = 0;
326 b->time = 0;
327 }
328 do_gettimeofday(&(b->start));
329}
330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
332{
333 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
334 int i = 0;
335 if (state == KGSL_PWRFLAGS_OFF) {
336 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
337 &pwr->power_flags)) {
338 KGSL_PWR_INFO(device,
339 "clocks off, device %d\n", device->id);
340 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
341 if (pwr->grp_clks[i])
342 clk_disable(pwr->grp_clks[i]);
343 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
344 (device->requested_state != KGSL_STATE_NAP))
345 clk_set_rate(pwr->grp_clks[0],
346 pwr->pwrlevels[pwr->num_pwrlevels - 1].
347 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700348 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 }
350 } else if (state == KGSL_PWRFLAGS_ON) {
351 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
352 &pwr->power_flags)) {
353 KGSL_PWR_INFO(device,
354 "clocks on, device %d\n", device->id);
355
356 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
357 (device->state != KGSL_STATE_NAP))
358 clk_set_rate(pwr->grp_clks[0],
359 pwr->pwrlevels[pwr->active_pwrlevel].
360 gpu_freq);
361
362 /* as last step, enable grp_clk
363 this is to let GPU interrupt to come */
364 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
365 if (pwr->grp_clks[i])
366 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700367 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368 }
369 }
370}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371
372void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
373{
374 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
375
376 if (state == KGSL_PWRFLAGS_OFF) {
377 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
378 &pwr->power_flags)) {
379 KGSL_PWR_INFO(device,
380 "axi off, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530381 if (pwr->ebi1_clk) {
382 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530384 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 if (pwr->pcl)
386 msm_bus_scale_client_update_request(pwr->pcl,
387 0);
388 }
389 } else if (state == KGSL_PWRFLAGS_ON) {
390 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
391 &pwr->power_flags)) {
392 KGSL_PWR_INFO(device,
393 "axi on, device %d\n", device->id);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530394 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530396 clk_set_rate(pwr->ebi1_clk,
397 pwr->pwrlevels[pwr->active_pwrlevel].
398 bus_freq);
399 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400 if (pwr->pcl)
401 msm_bus_scale_client_update_request(pwr->pcl,
402 pwr->pwrlevels[pwr->active_pwrlevel].
403 bus_freq);
404 }
405 }
406}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407
408void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
409{
410 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
411
412 if (state == KGSL_PWRFLAGS_OFF) {
413 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
414 &pwr->power_flags)) {
415 KGSL_PWR_INFO(device,
416 "power off, device %d\n", device->id);
417 if (pwr->gpu_reg)
418 regulator_disable(pwr->gpu_reg);
419 }
420 } else if (state == KGSL_PWRFLAGS_ON) {
421 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
422 &pwr->power_flags)) {
423 KGSL_PWR_INFO(device,
424 "power on, device %d\n", device->id);
425 if (pwr->gpu_reg)
426 regulator_enable(pwr->gpu_reg);
427 }
428 }
429}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430
431void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
432{
433 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
434
435 if (state == KGSL_PWRFLAGS_ON) {
436 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
437 &pwr->power_flags)) {
438 KGSL_PWR_INFO(device,
439 "irq on, device %d\n", device->id);
440 enable_irq(pwr->interrupt_num);
441 device->ftbl->irqctrl(device, 1);
442 }
443 } else if (state == KGSL_PWRFLAGS_OFF) {
444 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
445 &pwr->power_flags)) {
446 KGSL_PWR_INFO(device,
447 "irq off, device %d\n", device->id);
448 device->ftbl->irqctrl(device, 0);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600449 if (in_interrupt())
450 disable_irq_nosync(pwr->interrupt_num);
451 else
452 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453 }
454 }
455}
456EXPORT_SYMBOL(kgsl_pwrctrl_irq);
457
458int kgsl_pwrctrl_init(struct kgsl_device *device)
459{
460 int i, result = 0;
461 struct clk *clk;
462 struct platform_device *pdev =
463 container_of(device->parentdev, struct platform_device, dev);
464 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600465 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466
467 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600468 for (i = 0; i < KGSL_MAX_CLKS; i++) {
469 if (pdata->clk_map & clks[i].map) {
470 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 if (IS_ERR(clk))
472 goto clk_err;
473 pwr->grp_clks[i] = clk;
474 }
475 }
476 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600477 if (pwr->grp_clks[0] == NULL)
478 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479
480 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600481 if (pdata->set_grp_async != NULL)
482 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600484 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600486 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 result = -EINVAL;
488 goto done;
489 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600490 pwr->num_pwrlevels = pdata->num_levels;
491 pwr->active_pwrlevel = pdata->init_level;
492 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600494 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600496 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 gpu_freq) : 0;
498 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600499 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600500 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600501 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 }
503 /* Do not set_rate for targets in sync with AXI */
504 if (pwr->pwrlevels[0].gpu_freq > 0)
505 clk_set_rate(pwr->grp_clks[0], pwr->
506 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
507
508 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
509 if (IS_ERR(pwr->gpu_reg))
510 pwr->gpu_reg = NULL;
511
512 pwr->power_flags = 0;
513
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600514 pwr->nap_allowed = pdata->nap_allowed;
515 pwr->interval_timeout = pdata->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700516 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 if (IS_ERR(pwr->ebi1_clk))
518 pwr->ebi1_clk = NULL;
519 else
520 clk_set_rate(pwr->ebi1_clk,
521 pwr->pwrlevels[pwr->active_pwrlevel].
522 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600523 if (pdata->bus_scale_table != NULL) {
524 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525 bus_scale_table);
526 if (!pwr->pcl) {
527 KGSL_PWR_ERR(device,
528 "msm_bus_scale_register_client failed: "
529 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600530 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531 result = -EINVAL;
532 goto done;
533 }
534 }
535
536 /*acquire interrupt */
537 pwr->interrupt_num =
538 platform_get_irq_byname(pdev, pwr->irq_name);
539
540 if (pwr->interrupt_num <= 0) {
541 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
542 pwr->interrupt_num);
543 result = -EINVAL;
544 goto done;
545 }
546
547 register_early_suspend(&device->display_off);
548 return result;
549
550clk_err:
551 result = PTR_ERR(clk);
552 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600553 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554
555done:
556 return result;
557}
558
559void kgsl_pwrctrl_close(struct kgsl_device *device)
560{
561 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
562 int i;
563
564 KGSL_PWR_INFO(device, "close device %d\n", device->id);
565
566 unregister_early_suspend(&device->display_off);
567
568 if (pwr->interrupt_num > 0) {
569 if (pwr->have_irq) {
570 free_irq(pwr->interrupt_num, NULL);
571 pwr->have_irq = 0;
572 }
573 pwr->interrupt_num = 0;
574 }
575
576 clk_put(pwr->ebi1_clk);
577
578 if (pwr->pcl)
579 msm_bus_scale_unregister_client(pwr->pcl);
580
581 pwr->pcl = 0;
582
583 if (pwr->gpu_reg) {
584 regulator_put(pwr->gpu_reg);
585 pwr->gpu_reg = NULL;
586 }
587
588 for (i = 1; i < KGSL_MAX_CLKS; i++)
589 if (pwr->grp_clks[i]) {
590 clk_put(pwr->grp_clks[i]);
591 pwr->grp_clks[i] = NULL;
592 }
593
594 pwr->grp_clks[0] = NULL;
595 pwr->power_flags = 0;
596}
597
598void kgsl_idle_check(struct work_struct *work)
599{
600 struct kgsl_device *device = container_of(work, struct kgsl_device,
601 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700602 WARN_ON(device == NULL);
603 if (device == NULL)
604 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605
606 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju24569022011-10-27 11:11:12 -0700608 if ((device->requested_state != KGSL_STATE_SLEEP) &&
609 (device->requested_state != KGSL_STATE_SLUMBER))
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600610 kgsl_pwrscale_idle(device);
611
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700612 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613 mod_timer(&device->idle_timer,
614 jiffies +
615 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700616 /* If the GPU has been too busy to sleep, make sure *
617 * that is acurately reflected in the % busy numbers. */
618 device->pwrctrl.busy.no_nap_cnt++;
619 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
620 kgsl_pwrctrl_busy_time(device, true);
621 device->pwrctrl.busy.no_nap_cnt = 0;
622 }
623 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 } else if (device->state & (KGSL_STATE_HUNG |
625 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700626 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627 }
628
629 mutex_unlock(&device->mutex);
630}
631
632void kgsl_timer(unsigned long data)
633{
634 struct kgsl_device *device = (struct kgsl_device *) data;
635
636 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
637 if (device->requested_state != KGSL_STATE_SUSPEND) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700638 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 /* Have work run in a non-interrupt context. */
640 queue_work(device->work_queue, &device->idle_check_ws);
641 }
642}
643
644void kgsl_pre_hwaccess(struct kgsl_device *device)
645{
646 BUG_ON(!mutex_is_locked(&device->mutex));
Suman Tatiraju24569022011-10-27 11:11:12 -0700647 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP |
648 KGSL_STATE_SLUMBER))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 kgsl_pwrctrl_wake(device);
650}
651EXPORT_SYMBOL(kgsl_pre_hwaccess);
652
653void kgsl_check_suspended(struct kgsl_device *device)
654{
655 if (device->requested_state == KGSL_STATE_SUSPEND ||
656 device->state == KGSL_STATE_SUSPEND) {
657 mutex_unlock(&device->mutex);
658 wait_for_completion(&device->hwaccess_gate);
659 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700660 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 mutex_unlock(&device->mutex);
662 wait_for_completion(&device->recovery_gate);
663 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700664 } else if (device->state == KGSL_STATE_SLUMBER)
665 kgsl_pwrctrl_wake(device);
666}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667
Suman Tatiraju24569022011-10-27 11:11:12 -0700668static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700669_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700670{
Suman Tatiraju24569022011-10-27 11:11:12 -0700671 switch (device->state) {
672 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700673 if (!device->ftbl->isidle(device)) {
674 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
675 return -EBUSY;
676 }
677 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
678 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
679 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
680 if (device->idle_wakelock.name)
681 wake_unlock(&device->idle_wakelock);
Suman Tatiraju24569022011-10-27 11:11:12 -0700682 case KGSL_STATE_NAP:
683 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700684 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700685 break;
686 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700687 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700688 break;
689 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700690 return 0;
691}
692
693static void
694_sleep_accounting(struct kgsl_device *device)
695{
696 kgsl_pwrctrl_busy_time(device, false);
697 device->pwrctrl.busy.start.tv_sec = 0;
698 device->pwrctrl.time = 0;
699 kgsl_pwrscale_sleep(device);
700}
701
702static int
703_sleep(struct kgsl_device *device)
704{
705 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
706 switch (device->state) {
707 case KGSL_STATE_ACTIVE:
708 if (!device->ftbl->isidle(device)) {
709 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
710 return -EBUSY;
711 }
712 /* fall through */
713 case KGSL_STATE_NAP:
714 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
715 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
716 if (pwr->pwrlevels[0].gpu_freq > 0)
717 clk_set_rate(pwr->grp_clks[0],
718 pwr->pwrlevels[pwr->num_pwrlevels - 1].
719 gpu_freq);
720 _sleep_accounting(device);
721 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
722 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
723 if (device->idle_wakelock.name)
724 wake_unlock(&device->idle_wakelock);
725 break;
726 case KGSL_STATE_SLEEP:
727 case KGSL_STATE_SLUMBER:
728 break;
729 default:
730 KGSL_PWR_WARN(device, "unhandled state %x\n",
731 device->state);
732 break;
733 }
734 return 0;
735}
736
737static int
738_slumber(struct kgsl_device *device)
739{
740 switch (device->state) {
741 case KGSL_STATE_ACTIVE:
742 if (!device->ftbl->isidle(device)) {
743 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
744 device->pwrctrl.restore_slumber = true;
745 return -EBUSY;
746 }
747 /* fall through */
748 case KGSL_STATE_NAP:
749 case KGSL_STATE_SLEEP:
750 del_timer_sync(&device->idle_timer);
751 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
752 device->ftbl->suspend_context(device);
753 device->ftbl->stop(device);
754 device->pwrctrl.restore_slumber = true;
755 _sleep_accounting(device);
756 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
757 if (device->idle_wakelock.name)
758 wake_unlock(&device->idle_wakelock);
759 break;
760 case KGSL_STATE_SLUMBER:
761 break;
762 default:
763 KGSL_PWR_WARN(device, "unhandled state %x\n",
764 device->state);
765 break;
766 }
767 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700768}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700769
770/******************************************************************/
771/* Caller must hold the device mutex. */
772int kgsl_pwrctrl_sleep(struct kgsl_device *device)
773{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700774 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
776
777 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700778 switch (device->requested_state) {
779 case KGSL_STATE_NAP:
Suman Tatiraju24569022011-10-27 11:11:12 -0700780 if (device->pwrctrl.restore_slumber) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700781 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
782 break;
783 }
784 status = _nap(device);
785 break;
786 case KGSL_STATE_SLEEP:
787 if (device->pwrctrl.restore_slumber)
788 status = _slumber(device);
Lucille Sylvester43deede2011-12-15 16:37:25 -0700789 else
Jeremy Gebben388c2972011-12-16 09:05:07 -0700790 status = _sleep(device);
791 break;
792 case KGSL_STATE_SLUMBER:
793 status = _slumber(device);
794 break;
795 default:
796 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
797 device->requested_state);
798 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
799 status = -EINVAL;
800 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700802 return status;
803}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700804EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700805
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806/******************************************************************/
807/* Caller must hold the device mutex. */
808void kgsl_pwrctrl_wake(struct kgsl_device *device)
809{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700810 int status;
811 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
812 switch (device->state) {
813 case KGSL_STATE_SLUMBER:
814 status = device->ftbl->start(device, 0);
815 if (status) {
816 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
817 KGSL_DRV_ERR(device, "start failed %d\n", status);
818 break;
819 }
820 /* fall through */
821 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700822 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
823 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700824 /* fall through */
825 case KGSL_STATE_NAP:
826 /* Turn on the core clocks */
827 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
828 /* Enable state before turning on irq */
829 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
830 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
831 /* Re-enable HW access */
832 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 jiffies + device->pwrctrl.interval_timeout);
834
Jeremy Gebben388c2972011-12-16 09:05:07 -0700835 if (device->idle_wakelock.name)
836 wake_lock(&device->idle_wakelock);
837 case KGSL_STATE_ACTIVE:
838 break;
839 default:
840 KGSL_PWR_WARN(device, "unhandled state %x\n",
841 device->state);
842 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
843 break;
844 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845}
846EXPORT_SYMBOL(kgsl_pwrctrl_wake);
847
848void kgsl_pwrctrl_enable(struct kgsl_device *device)
849{
850 /* Order pwrrail/clk sequence based upon platform */
851 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
852 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
853 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
854}
855EXPORT_SYMBOL(kgsl_pwrctrl_enable);
856
857void kgsl_pwrctrl_disable(struct kgsl_device *device)
858{
859 /* Order pwrrail/clk sequence based upon platform */
860 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
861 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
862 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
863}
864EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700865
866void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
867{
868 KGSL_PWR_WARN(device, "%x\n", state);
869 device->state = state;
870 device->requested_state = KGSL_STATE_NONE;
871}
872EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
873
874void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
875{
876 if (state != KGSL_STATE_NONE && state != device->requested_state)
877 KGSL_PWR_INFO(device, "%x\n", state);
878 device->requested_state = state;
879}
880EXPORT_SYMBOL(kgsl_pwrctrl_request_state);