blob: b309cb2966cbe57c7f439123b38d08c09923f9d0 [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070016#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017
18#include "kgsl.h"
19#include "kgsl_pwrscale.h"
20#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070021#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060023#define KGSL_PWRFLAGS_POWER_ON 0
24#define KGSL_PWRFLAGS_CLK_ON 1
25#define KGSL_PWRFLAGS_AXI_ON 2
26#define KGSL_PWRFLAGS_IRQ_ON 3
27
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070028#define UPDATE_BUSY_VAL 1000000
29#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060031struct clk_pair {
32 const char *name;
33 uint map;
34};
35
36struct clk_pair clks[KGSL_MAX_CLKS] = {
37 {
38 .name = "src_clk",
39 .map = KGSL_CLK_SRC,
40 },
41 {
42 .name = "core_clk",
43 .map = KGSL_CLK_CORE,
44 },
45 {
46 .name = "iface_clk",
47 .map = KGSL_CLK_IFACE,
48 },
49 {
50 .name = "mem_clk",
51 .map = KGSL_CLK_MEM,
52 },
53 {
54 .name = "mem_iface_clk",
55 .map = KGSL_CLK_MEM_IFACE,
56 },
57};
58
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
60 unsigned int new_level)
61{
62 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
63 if (new_level < (pwr->num_pwrlevels - 1) &&
64 new_level >= pwr->thermal_pwrlevel &&
65 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070066 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060068 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
69 (device->state == KGSL_STATE_NAP))
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070070 clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060071 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072 if (pwr->pcl)
73 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070074 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060075 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070076 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060077 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070078 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
79 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080 }
81}
82EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
83
84static int __gpuclk_store(int max, struct device *dev,
85 struct device_attribute *attr,
86 const char *buf, size_t count)
87{ int ret, i, delta = 5000000;
88 unsigned long val;
89 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060090 struct kgsl_pwrctrl *pwr;
91
92 if (device == NULL)
93 return 0;
94 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
96 ret = sscanf(buf, "%ld", &val);
97 if (ret != 1)
98 return count;
99
100 mutex_lock(&device->mutex);
101 for (i = 0; i < pwr->num_pwrlevels; i++) {
102 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
103 if (max)
104 pwr->thermal_pwrlevel = i;
105 break;
106 }
107 }
108
109 if (i == pwr->num_pwrlevels)
110 goto done;
111
112 /*
113 * If the current or requested clock speed is greater than the
114 * thermal limit, bump down immediately.
115 */
116
117 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
118 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
119 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
120 else if (!max)
121 kgsl_pwrctrl_pwrlevel_change(device, i);
122
123done:
124 mutex_unlock(&device->mutex);
125 return count;
126}
127
128static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
129 struct device_attribute *attr,
130 const char *buf, size_t count)
131{
132 return __gpuclk_store(1, dev, attr, buf, count);
133}
134
135static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
136 struct device_attribute *attr,
137 char *buf)
138{
139 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600140 struct kgsl_pwrctrl *pwr;
141 if (device == NULL)
142 return 0;
143 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 return snprintf(buf, PAGE_SIZE, "%d\n",
145 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
146}
147
148static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
149 struct device_attribute *attr,
150 const char *buf, size_t count)
151{
152 return __gpuclk_store(0, dev, attr, buf, count);
153}
154
155static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
156 struct device_attribute *attr,
157 char *buf)
158{
159 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600160 struct kgsl_pwrctrl *pwr;
161 if (device == NULL)
162 return 0;
163 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 return snprintf(buf, PAGE_SIZE, "%d\n",
165 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
166}
167
168static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
169 struct device_attribute *attr,
170 const char *buf, size_t count)
171{
172 char temp[20];
173 unsigned long val;
174 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600175 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 int rc;
177
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600178 if (device == NULL)
179 return 0;
180 pwr = &device->pwrctrl;
181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 snprintf(temp, sizeof(temp), "%.*s",
183 (int)min(count, sizeof(temp) - 1), buf);
184 rc = strict_strtoul(temp, 0, &val);
185 if (rc)
186 return rc;
187
188 mutex_lock(&device->mutex);
189
190 if (val == 1)
191 pwr->nap_allowed = true;
192 else if (val == 0)
193 pwr->nap_allowed = false;
194
195 mutex_unlock(&device->mutex);
196
197 return count;
198}
199
200static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
201 struct device_attribute *attr,
202 char *buf)
203{
204 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600205 if (device == NULL)
206 return 0;
207 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208}
209
210
211static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
212 struct device_attribute *attr,
213 const char *buf, size_t count)
214{
215 char temp[20];
216 unsigned long val;
217 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600218 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 const long div = 1000/HZ;
220 static unsigned int org_interval_timeout = 1;
221 int rc;
222
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600223 if (device == NULL)
224 return 0;
225 pwr = &device->pwrctrl;
226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 snprintf(temp, sizeof(temp), "%.*s",
228 (int)min(count, sizeof(temp) - 1), buf);
229 rc = strict_strtoul(temp, 0, &val);
230 if (rc)
231 return rc;
232
233 if (org_interval_timeout == 1)
234 org_interval_timeout = pwr->interval_timeout;
235
236 mutex_lock(&device->mutex);
237
238 /* Let the timeout be requested in ms, but convert to jiffies. */
239 val /= div;
240 if (val >= org_interval_timeout)
241 pwr->interval_timeout = val;
242
243 mutex_unlock(&device->mutex);
244
245 return count;
246}
247
248static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
249 struct device_attribute *attr,
250 char *buf)
251{
252 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600253 if (device == NULL)
254 return 0;
255 return snprintf(buf, PAGE_SIZE, "%d\n",
256 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257}
258
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700259static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
260 struct device_attribute *attr,
261 char *buf)
262{
263 int ret;
264 struct kgsl_device *device = kgsl_device_from_dev(dev);
265 struct kgsl_busy *b = &device->pwrctrl.busy;
266 ret = snprintf(buf, 17, "%7d %7d\n",
267 b->on_time_old, b->time_old);
268 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
269 b->on_time_old = 0;
270 b->time_old = 0;
271 }
272 return ret;
273}
274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
276DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
277 kgsl_pwrctrl_max_gpuclk_store);
Lucille Sylvester67138c92011-12-07 17:26:29 -0700278DEVICE_ATTR(pwrnap, 0666, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
280 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700281DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
282 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283
284static const struct device_attribute *pwrctrl_attr_list[] = {
285 &dev_attr_gpuclk,
286 &dev_attr_max_gpuclk,
287 &dev_attr_pwrnap,
288 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700289 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 NULL
291};
292
293int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
294{
295 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
296}
297
298void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
299{
300 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
301}
302
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700303/* Track the amount of time the gpu is on vs the total system time. *
304 * Regularly update the percentage of busy time displayed by sysfs. */
305static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
306{
307 struct kgsl_busy *b = &device->pwrctrl.busy;
308 int elapsed;
309 if (b->start.tv_sec == 0)
310 do_gettimeofday(&(b->start));
311 do_gettimeofday(&(b->stop));
312 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
313 elapsed += b->stop.tv_usec - b->start.tv_usec;
314 b->time += elapsed;
315 if (on_time)
316 b->on_time += elapsed;
317 /* Update the output regularly and reset the counters. */
318 if ((b->time > UPDATE_BUSY_VAL) ||
319 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
320 b->on_time_old = b->on_time;
321 b->time_old = b->time;
322 b->on_time = 0;
323 b->time = 0;
324 }
325 do_gettimeofday(&(b->start));
326}
327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
329{
330 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
331 int i = 0;
332 if (state == KGSL_PWRFLAGS_OFF) {
333 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
334 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700335 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
337 if (pwr->grp_clks[i])
338 clk_disable(pwr->grp_clks[i]);
339 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
340 (device->requested_state != KGSL_STATE_NAP))
341 clk_set_rate(pwr->grp_clks[0],
342 pwr->pwrlevels[pwr->num_pwrlevels - 1].
343 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700344 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 }
346 } else if (state == KGSL_PWRFLAGS_ON) {
347 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
348 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700349 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
351 (device->state != KGSL_STATE_NAP))
352 clk_set_rate(pwr->grp_clks[0],
353 pwr->pwrlevels[pwr->active_pwrlevel].
354 gpu_freq);
355
356 /* as last step, enable grp_clk
357 this is to let GPU interrupt to come */
358 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
359 if (pwr->grp_clks[i])
360 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700361 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 }
363 }
364}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365
366void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
367{
368 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
369
370 if (state == KGSL_PWRFLAGS_OFF) {
371 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
372 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700373 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530374 if (pwr->ebi1_clk) {
375 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530377 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 if (pwr->pcl)
379 msm_bus_scale_client_update_request(pwr->pcl,
380 0);
381 }
382 } else if (state == KGSL_PWRFLAGS_ON) {
383 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
384 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700385 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530386 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530388 clk_set_rate(pwr->ebi1_clk,
389 pwr->pwrlevels[pwr->active_pwrlevel].
390 bus_freq);
391 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392 if (pwr->pcl)
393 msm_bus_scale_client_update_request(pwr->pcl,
394 pwr->pwrlevels[pwr->active_pwrlevel].
395 bus_freq);
396 }
397 }
398}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399
400void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
401{
402 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
403
404 if (state == KGSL_PWRFLAGS_OFF) {
405 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
406 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700407 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 if (pwr->gpu_reg)
409 regulator_disable(pwr->gpu_reg);
410 }
411 } else if (state == KGSL_PWRFLAGS_ON) {
412 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
413 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700414 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 if (pwr->gpu_reg)
416 regulator_enable(pwr->gpu_reg);
417 }
418 }
419}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420
421void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
422{
423 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
424
425 if (state == KGSL_PWRFLAGS_ON) {
426 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
427 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700428 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 }
431 } else if (state == KGSL_PWRFLAGS_OFF) {
432 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
433 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700434 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600435 if (in_interrupt())
436 disable_irq_nosync(pwr->interrupt_num);
437 else
438 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 }
440 }
441}
442EXPORT_SYMBOL(kgsl_pwrctrl_irq);
443
444int kgsl_pwrctrl_init(struct kgsl_device *device)
445{
446 int i, result = 0;
447 struct clk *clk;
448 struct platform_device *pdev =
449 container_of(device->parentdev, struct platform_device, dev);
450 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600451 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452
453 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600454 for (i = 0; i < KGSL_MAX_CLKS; i++) {
455 if (pdata->clk_map & clks[i].map) {
456 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 if (IS_ERR(clk))
458 goto clk_err;
459 pwr->grp_clks[i] = clk;
460 }
461 }
462 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600463 if (pwr->grp_clks[0] == NULL)
464 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465
466 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600467 if (pdata->set_grp_async != NULL)
468 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600470 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600472 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 result = -EINVAL;
474 goto done;
475 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600476 pwr->num_pwrlevels = pdata->num_levels;
477 pwr->active_pwrlevel = pdata->init_level;
478 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600480 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600482 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 gpu_freq) : 0;
484 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600485 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600486 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600487 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 }
489 /* Do not set_rate for targets in sync with AXI */
490 if (pwr->pwrlevels[0].gpu_freq > 0)
491 clk_set_rate(pwr->grp_clks[0], pwr->
492 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
493
494 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
495 if (IS_ERR(pwr->gpu_reg))
496 pwr->gpu_reg = NULL;
497
498 pwr->power_flags = 0;
499
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600500 pwr->nap_allowed = pdata->nap_allowed;
501 pwr->interval_timeout = pdata->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700502 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 if (IS_ERR(pwr->ebi1_clk))
504 pwr->ebi1_clk = NULL;
505 else
506 clk_set_rate(pwr->ebi1_clk,
507 pwr->pwrlevels[pwr->active_pwrlevel].
508 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600509 if (pdata->bus_scale_table != NULL) {
510 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 bus_scale_table);
512 if (!pwr->pcl) {
513 KGSL_PWR_ERR(device,
514 "msm_bus_scale_register_client failed: "
515 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600516 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 result = -EINVAL;
518 goto done;
519 }
520 }
521
522 /*acquire interrupt */
523 pwr->interrupt_num =
524 platform_get_irq_byname(pdev, pwr->irq_name);
525
526 if (pwr->interrupt_num <= 0) {
527 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
528 pwr->interrupt_num);
529 result = -EINVAL;
530 goto done;
531 }
532
533 register_early_suspend(&device->display_off);
534 return result;
535
536clk_err:
537 result = PTR_ERR(clk);
538 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600539 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540
541done:
542 return result;
543}
544
545void kgsl_pwrctrl_close(struct kgsl_device *device)
546{
547 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
548 int i;
549
550 KGSL_PWR_INFO(device, "close device %d\n", device->id);
551
552 unregister_early_suspend(&device->display_off);
553
554 if (pwr->interrupt_num > 0) {
555 if (pwr->have_irq) {
556 free_irq(pwr->interrupt_num, NULL);
557 pwr->have_irq = 0;
558 }
559 pwr->interrupt_num = 0;
560 }
561
562 clk_put(pwr->ebi1_clk);
563
564 if (pwr->pcl)
565 msm_bus_scale_unregister_client(pwr->pcl);
566
567 pwr->pcl = 0;
568
569 if (pwr->gpu_reg) {
570 regulator_put(pwr->gpu_reg);
571 pwr->gpu_reg = NULL;
572 }
573
574 for (i = 1; i < KGSL_MAX_CLKS; i++)
575 if (pwr->grp_clks[i]) {
576 clk_put(pwr->grp_clks[i]);
577 pwr->grp_clks[i] = NULL;
578 }
579
580 pwr->grp_clks[0] = NULL;
581 pwr->power_flags = 0;
582}
583
584void kgsl_idle_check(struct work_struct *work)
585{
586 struct kgsl_device *device = container_of(work, struct kgsl_device,
587 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700588 WARN_ON(device == NULL);
589 if (device == NULL)
590 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591
592 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju24569022011-10-27 11:11:12 -0700594 if ((device->requested_state != KGSL_STATE_SLEEP) &&
595 (device->requested_state != KGSL_STATE_SLUMBER))
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600596 kgsl_pwrscale_idle(device);
597
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700598 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 mod_timer(&device->idle_timer,
600 jiffies +
601 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700602 /* If the GPU has been too busy to sleep, make sure *
603 * that is acurately reflected in the % busy numbers. */
604 device->pwrctrl.busy.no_nap_cnt++;
605 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
606 kgsl_pwrctrl_busy_time(device, true);
607 device->pwrctrl.busy.no_nap_cnt = 0;
608 }
609 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 } else if (device->state & (KGSL_STATE_HUNG |
611 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700612 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613 }
614
615 mutex_unlock(&device->mutex);
616}
617
618void kgsl_timer(unsigned long data)
619{
620 struct kgsl_device *device = (struct kgsl_device *) data;
621
622 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
623 if (device->requested_state != KGSL_STATE_SUSPEND) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700624 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 /* Have work run in a non-interrupt context. */
626 queue_work(device->work_queue, &device->idle_check_ws);
627 }
628}
629
630void kgsl_pre_hwaccess(struct kgsl_device *device)
631{
632 BUG_ON(!mutex_is_locked(&device->mutex));
Suman Tatiraju24569022011-10-27 11:11:12 -0700633 if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP |
634 KGSL_STATE_SLUMBER))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 kgsl_pwrctrl_wake(device);
636}
637EXPORT_SYMBOL(kgsl_pre_hwaccess);
638
639void kgsl_check_suspended(struct kgsl_device *device)
640{
641 if (device->requested_state == KGSL_STATE_SUSPEND ||
642 device->state == KGSL_STATE_SUSPEND) {
643 mutex_unlock(&device->mutex);
644 wait_for_completion(&device->hwaccess_gate);
645 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700646 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 mutex_unlock(&device->mutex);
648 wait_for_completion(&device->recovery_gate);
649 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700650 } else if (device->state == KGSL_STATE_SLUMBER)
651 kgsl_pwrctrl_wake(device);
652}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653
Suman Tatiraju24569022011-10-27 11:11:12 -0700654static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700655_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700656{
Suman Tatiraju24569022011-10-27 11:11:12 -0700657 switch (device->state) {
658 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700659 if (!device->ftbl->isidle(device)) {
660 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
661 return -EBUSY;
662 }
663 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
664 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
665 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
666 if (device->idle_wakelock.name)
667 wake_unlock(&device->idle_wakelock);
Suman Tatiraju24569022011-10-27 11:11:12 -0700668 case KGSL_STATE_NAP:
669 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700670 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700671 break;
672 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700673 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700674 break;
675 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700676 return 0;
677}
678
679static void
680_sleep_accounting(struct kgsl_device *device)
681{
682 kgsl_pwrctrl_busy_time(device, false);
683 device->pwrctrl.busy.start.tv_sec = 0;
684 device->pwrctrl.time = 0;
685 kgsl_pwrscale_sleep(device);
686}
687
688static int
689_sleep(struct kgsl_device *device)
690{
691 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
692 switch (device->state) {
693 case KGSL_STATE_ACTIVE:
694 if (!device->ftbl->isidle(device)) {
695 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
696 return -EBUSY;
697 }
698 /* fall through */
699 case KGSL_STATE_NAP:
700 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
701 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
702 if (pwr->pwrlevels[0].gpu_freq > 0)
703 clk_set_rate(pwr->grp_clks[0],
704 pwr->pwrlevels[pwr->num_pwrlevels - 1].
705 gpu_freq);
706 _sleep_accounting(device);
707 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
708 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
709 if (device->idle_wakelock.name)
710 wake_unlock(&device->idle_wakelock);
711 break;
712 case KGSL_STATE_SLEEP:
713 case KGSL_STATE_SLUMBER:
714 break;
715 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700716 KGSL_PWR_WARN(device, "unhandled state %s\n",
717 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700718 break;
719 }
720 return 0;
721}
722
723static int
724_slumber(struct kgsl_device *device)
725{
726 switch (device->state) {
727 case KGSL_STATE_ACTIVE:
728 if (!device->ftbl->isidle(device)) {
729 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
730 device->pwrctrl.restore_slumber = true;
731 return -EBUSY;
732 }
733 /* fall through */
734 case KGSL_STATE_NAP:
735 case KGSL_STATE_SLEEP:
736 del_timer_sync(&device->idle_timer);
737 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
738 device->ftbl->suspend_context(device);
739 device->ftbl->stop(device);
740 device->pwrctrl.restore_slumber = true;
741 _sleep_accounting(device);
742 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
743 if (device->idle_wakelock.name)
744 wake_unlock(&device->idle_wakelock);
745 break;
746 case KGSL_STATE_SLUMBER:
747 break;
748 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700749 KGSL_PWR_WARN(device, "unhandled state %s\n",
750 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700751 break;
752 }
753 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700754}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755
756/******************************************************************/
757/* Caller must hold the device mutex. */
758int kgsl_pwrctrl_sleep(struct kgsl_device *device)
759{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700760 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
762
763 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700764 switch (device->requested_state) {
765 case KGSL_STATE_NAP:
Suman Tatiraju24569022011-10-27 11:11:12 -0700766 if (device->pwrctrl.restore_slumber) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700767 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
768 break;
769 }
770 status = _nap(device);
771 break;
772 case KGSL_STATE_SLEEP:
773 if (device->pwrctrl.restore_slumber)
774 status = _slumber(device);
Lucille Sylvester43deede2011-12-15 16:37:25 -0700775 else
Jeremy Gebben388c2972011-12-16 09:05:07 -0700776 status = _sleep(device);
777 break;
778 case KGSL_STATE_SLUMBER:
779 status = _slumber(device);
780 break;
781 default:
782 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
783 device->requested_state);
784 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
785 status = -EINVAL;
786 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700788 return status;
789}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700790EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700792/******************************************************************/
793/* Caller must hold the device mutex. */
794void kgsl_pwrctrl_wake(struct kgsl_device *device)
795{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700796 int status;
797 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
798 switch (device->state) {
799 case KGSL_STATE_SLUMBER:
800 status = device->ftbl->start(device, 0);
801 if (status) {
802 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
803 KGSL_DRV_ERR(device, "start failed %d\n", status);
804 break;
805 }
806 /* fall through */
807 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
809 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700810 /* fall through */
811 case KGSL_STATE_NAP:
812 /* Turn on the core clocks */
813 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
814 /* Enable state before turning on irq */
815 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
816 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
817 /* Re-enable HW access */
818 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 jiffies + device->pwrctrl.interval_timeout);
820
Jeremy Gebben388c2972011-12-16 09:05:07 -0700821 if (device->idle_wakelock.name)
822 wake_lock(&device->idle_wakelock);
823 case KGSL_STATE_ACTIVE:
824 break;
825 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700826 KGSL_PWR_WARN(device, "unhandled state %s\n",
827 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700828 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
829 break;
830 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831}
832EXPORT_SYMBOL(kgsl_pwrctrl_wake);
833
834void kgsl_pwrctrl_enable(struct kgsl_device *device)
835{
836 /* Order pwrrail/clk sequence based upon platform */
837 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
838 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
839 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
840}
841EXPORT_SYMBOL(kgsl_pwrctrl_enable);
842
843void kgsl_pwrctrl_disable(struct kgsl_device *device)
844{
845 /* Order pwrrail/clk sequence based upon platform */
846 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
847 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
848 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
849}
850EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700851
852void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
853{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700854 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700855 device->state = state;
856 device->requested_state = KGSL_STATE_NONE;
857}
858EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
859
860void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
861{
862 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700863 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700864 device->requested_state = state;
865}
866EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700867
868const char *kgsl_pwrstate_to_str(unsigned int state)
869{
870 switch (state) {
871 case KGSL_STATE_NONE:
872 return "NONE";
873 case KGSL_STATE_INIT:
874 return "INIT";
875 case KGSL_STATE_ACTIVE:
876 return "ACTIVE";
877 case KGSL_STATE_NAP:
878 return "NAP";
879 case KGSL_STATE_SLEEP:
880 return "SLEEP";
881 case KGSL_STATE_SUSPEND:
882 return "SUSPEND";
883 case KGSL_STATE_HUNG:
884 return "HUNG";
885 case KGSL_STATE_DUMP_AND_RECOVER:
886 return "DNR";
887 case KGSL_STATE_SLUMBER:
888 return "SLUMBER";
889 default:
890 break;
891 }
892 return "UNKNOWN";
893}
894EXPORT_SYMBOL(kgsl_pwrstate_to_str);
895