blob: 1ef71a458a4a110b71242ae837ebad77e6c8b12a [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070016#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017
18#include "kgsl.h"
19#include "kgsl_pwrscale.h"
20#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070021#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060023#define KGSL_PWRFLAGS_POWER_ON 0
24#define KGSL_PWRFLAGS_CLK_ON 1
25#define KGSL_PWRFLAGS_AXI_ON 2
26#define KGSL_PWRFLAGS_IRQ_ON 3
27
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070028#define UPDATE_BUSY_VAL 1000000
29#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060031struct clk_pair {
32 const char *name;
33 uint map;
34};
35
36struct clk_pair clks[KGSL_MAX_CLKS] = {
37 {
38 .name = "src_clk",
39 .map = KGSL_CLK_SRC,
40 },
41 {
42 .name = "core_clk",
43 .map = KGSL_CLK_CORE,
44 },
45 {
46 .name = "iface_clk",
47 .map = KGSL_CLK_IFACE,
48 },
49 {
50 .name = "mem_clk",
51 .map = KGSL_CLK_MEM,
52 },
53 {
54 .name = "mem_iface_clk",
55 .map = KGSL_CLK_MEM_IFACE,
56 },
57};
58
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
60 unsigned int new_level)
61{
62 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
63 if (new_level < (pwr->num_pwrlevels - 1) &&
64 new_level >= pwr->thermal_pwrlevel &&
65 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070066 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060068 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
69 (device->state == KGSL_STATE_NAP))
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070070 clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060071 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072 if (pwr->pcl)
73 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070074 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060075 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070076 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060077 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070078 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
79 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080 }
81}
82EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
83
84static int __gpuclk_store(int max, struct device *dev,
85 struct device_attribute *attr,
86 const char *buf, size_t count)
87{ int ret, i, delta = 5000000;
88 unsigned long val;
89 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060090 struct kgsl_pwrctrl *pwr;
91
92 if (device == NULL)
93 return 0;
94 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
96 ret = sscanf(buf, "%ld", &val);
97 if (ret != 1)
98 return count;
99
100 mutex_lock(&device->mutex);
101 for (i = 0; i < pwr->num_pwrlevels; i++) {
102 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
103 if (max)
104 pwr->thermal_pwrlevel = i;
105 break;
106 }
107 }
108
109 if (i == pwr->num_pwrlevels)
110 goto done;
111
112 /*
113 * If the current or requested clock speed is greater than the
114 * thermal limit, bump down immediately.
115 */
116
117 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
118 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
119 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
120 else if (!max)
121 kgsl_pwrctrl_pwrlevel_change(device, i);
122
123done:
124 mutex_unlock(&device->mutex);
125 return count;
126}
127
128static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
129 struct device_attribute *attr,
130 const char *buf, size_t count)
131{
132 return __gpuclk_store(1, dev, attr, buf, count);
133}
134
135static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
136 struct device_attribute *attr,
137 char *buf)
138{
139 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600140 struct kgsl_pwrctrl *pwr;
141 if (device == NULL)
142 return 0;
143 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 return snprintf(buf, PAGE_SIZE, "%d\n",
145 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
146}
147
148static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
149 struct device_attribute *attr,
150 const char *buf, size_t count)
151{
152 return __gpuclk_store(0, dev, attr, buf, count);
153}
154
155static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
156 struct device_attribute *attr,
157 char *buf)
158{
159 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600160 struct kgsl_pwrctrl *pwr;
161 if (device == NULL)
162 return 0;
163 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 return snprintf(buf, PAGE_SIZE, "%d\n",
165 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
166}
167
168static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
169 struct device_attribute *attr,
170 const char *buf, size_t count)
171{
172 char temp[20];
173 unsigned long val;
174 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600175 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 int rc;
177
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600178 if (device == NULL)
179 return 0;
180 pwr = &device->pwrctrl;
181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 snprintf(temp, sizeof(temp), "%.*s",
183 (int)min(count, sizeof(temp) - 1), buf);
184 rc = strict_strtoul(temp, 0, &val);
185 if (rc)
186 return rc;
187
188 mutex_lock(&device->mutex);
189
190 if (val == 1)
191 pwr->nap_allowed = true;
192 else if (val == 0)
193 pwr->nap_allowed = false;
194
195 mutex_unlock(&device->mutex);
196
197 return count;
198}
199
200static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
201 struct device_attribute *attr,
202 char *buf)
203{
204 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600205 if (device == NULL)
206 return 0;
207 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208}
209
210
211static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
212 struct device_attribute *attr,
213 const char *buf, size_t count)
214{
215 char temp[20];
216 unsigned long val;
217 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600218 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 const long div = 1000/HZ;
220 static unsigned int org_interval_timeout = 1;
221 int rc;
222
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600223 if (device == NULL)
224 return 0;
225 pwr = &device->pwrctrl;
226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 snprintf(temp, sizeof(temp), "%.*s",
228 (int)min(count, sizeof(temp) - 1), buf);
229 rc = strict_strtoul(temp, 0, &val);
230 if (rc)
231 return rc;
232
233 if (org_interval_timeout == 1)
234 org_interval_timeout = pwr->interval_timeout;
235
236 mutex_lock(&device->mutex);
237
238 /* Let the timeout be requested in ms, but convert to jiffies. */
239 val /= div;
240 if (val >= org_interval_timeout)
241 pwr->interval_timeout = val;
242
243 mutex_unlock(&device->mutex);
244
245 return count;
246}
247
248static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
249 struct device_attribute *attr,
250 char *buf)
251{
252 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600253 if (device == NULL)
254 return 0;
255 return snprintf(buf, PAGE_SIZE, "%d\n",
256 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257}
258
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700259static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
260 struct device_attribute *attr,
261 char *buf)
262{
263 int ret;
264 struct kgsl_device *device = kgsl_device_from_dev(dev);
265 struct kgsl_busy *b = &device->pwrctrl.busy;
266 ret = snprintf(buf, 17, "%7d %7d\n",
267 b->on_time_old, b->time_old);
268 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
269 b->on_time_old = 0;
270 b->time_old = 0;
271 }
272 return ret;
273}
274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
276DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
277 kgsl_pwrctrl_max_gpuclk_store);
Lucille Sylvester67138c92011-12-07 17:26:29 -0700278DEVICE_ATTR(pwrnap, 0666, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
280 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700281DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
282 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283
284static const struct device_attribute *pwrctrl_attr_list[] = {
285 &dev_attr_gpuclk,
286 &dev_attr_max_gpuclk,
287 &dev_attr_pwrnap,
288 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700289 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 NULL
291};
292
293int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
294{
295 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
296}
297
298void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
299{
300 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
301}
302
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700303/* Track the amount of time the gpu is on vs the total system time. *
304 * Regularly update the percentage of busy time displayed by sysfs. */
305static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
306{
307 struct kgsl_busy *b = &device->pwrctrl.busy;
308 int elapsed;
309 if (b->start.tv_sec == 0)
310 do_gettimeofday(&(b->start));
311 do_gettimeofday(&(b->stop));
312 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
313 elapsed += b->stop.tv_usec - b->start.tv_usec;
314 b->time += elapsed;
315 if (on_time)
316 b->on_time += elapsed;
317 /* Update the output regularly and reset the counters. */
318 if ((b->time > UPDATE_BUSY_VAL) ||
319 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
320 b->on_time_old = b->on_time;
321 b->time_old = b->time;
322 b->on_time = 0;
323 b->time = 0;
324 }
325 do_gettimeofday(&(b->start));
326}
327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
329{
330 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
331 int i = 0;
332 if (state == KGSL_PWRFLAGS_OFF) {
333 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
334 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700335 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
337 if (pwr->grp_clks[i])
338 clk_disable(pwr->grp_clks[i]);
339 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
340 (device->requested_state != KGSL_STATE_NAP))
341 clk_set_rate(pwr->grp_clks[0],
342 pwr->pwrlevels[pwr->num_pwrlevels - 1].
343 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700344 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 }
346 } else if (state == KGSL_PWRFLAGS_ON) {
347 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
348 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700349 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
351 (device->state != KGSL_STATE_NAP))
352 clk_set_rate(pwr->grp_clks[0],
353 pwr->pwrlevels[pwr->active_pwrlevel].
354 gpu_freq);
355
356 /* as last step, enable grp_clk
357 this is to let GPU interrupt to come */
358 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
359 if (pwr->grp_clks[i])
360 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700361 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 }
363 }
364}
Tarun Karraf8e5cd22012-01-09 14:10:09 -0700365EXPORT_SYMBOL(kgsl_pwrctrl_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366
367void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
368{
369 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
370
371 if (state == KGSL_PWRFLAGS_OFF) {
372 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
373 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700374 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530375 if (pwr->ebi1_clk) {
376 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530378 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 if (pwr->pcl)
380 msm_bus_scale_client_update_request(pwr->pcl,
381 0);
382 }
383 } else if (state == KGSL_PWRFLAGS_ON) {
384 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
385 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700386 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530387 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530389 clk_set_rate(pwr->ebi1_clk,
390 pwr->pwrlevels[pwr->active_pwrlevel].
391 bus_freq);
392 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 if (pwr->pcl)
394 msm_bus_scale_client_update_request(pwr->pcl,
395 pwr->pwrlevels[pwr->active_pwrlevel].
396 bus_freq);
397 }
398 }
399}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400
401void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
402{
403 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
404
405 if (state == KGSL_PWRFLAGS_OFF) {
406 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
407 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700408 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 if (pwr->gpu_reg)
410 regulator_disable(pwr->gpu_reg);
411 }
412 } else if (state == KGSL_PWRFLAGS_ON) {
413 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
414 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700415 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 if (pwr->gpu_reg)
417 regulator_enable(pwr->gpu_reg);
418 }
419 }
420}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421
422void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
423{
424 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
425
426 if (state == KGSL_PWRFLAGS_ON) {
427 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
428 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700429 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 }
432 } else if (state == KGSL_PWRFLAGS_OFF) {
433 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
434 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700435 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600436 if (in_interrupt())
437 disable_irq_nosync(pwr->interrupt_num);
438 else
439 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 }
441 }
442}
443EXPORT_SYMBOL(kgsl_pwrctrl_irq);
444
445int kgsl_pwrctrl_init(struct kgsl_device *device)
446{
447 int i, result = 0;
448 struct clk *clk;
449 struct platform_device *pdev =
450 container_of(device->parentdev, struct platform_device, dev);
451 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600452 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453
454 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600455 for (i = 0; i < KGSL_MAX_CLKS; i++) {
456 if (pdata->clk_map & clks[i].map) {
457 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 if (IS_ERR(clk))
459 goto clk_err;
460 pwr->grp_clks[i] = clk;
461 }
462 }
463 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600464 if (pwr->grp_clks[0] == NULL)
465 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466
467 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600468 if (pdata->set_grp_async != NULL)
469 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600471 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600473 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474 result = -EINVAL;
475 goto done;
476 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600477 pwr->num_pwrlevels = pdata->num_levels;
478 pwr->active_pwrlevel = pdata->init_level;
479 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600481 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600483 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 gpu_freq) : 0;
485 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600486 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600487 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600488 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 }
490 /* Do not set_rate for targets in sync with AXI */
491 if (pwr->pwrlevels[0].gpu_freq > 0)
492 clk_set_rate(pwr->grp_clks[0], pwr->
493 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
494
495 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
496 if (IS_ERR(pwr->gpu_reg))
497 pwr->gpu_reg = NULL;
498
499 pwr->power_flags = 0;
500
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600501 pwr->nap_allowed = pdata->nap_allowed;
502 pwr->interval_timeout = pdata->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700503 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 if (IS_ERR(pwr->ebi1_clk))
505 pwr->ebi1_clk = NULL;
506 else
507 clk_set_rate(pwr->ebi1_clk,
508 pwr->pwrlevels[pwr->active_pwrlevel].
509 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600510 if (pdata->bus_scale_table != NULL) {
511 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 bus_scale_table);
513 if (!pwr->pcl) {
514 KGSL_PWR_ERR(device,
515 "msm_bus_scale_register_client failed: "
516 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600517 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 result = -EINVAL;
519 goto done;
520 }
521 }
522
523 /*acquire interrupt */
524 pwr->interrupt_num =
525 platform_get_irq_byname(pdev, pwr->irq_name);
526
527 if (pwr->interrupt_num <= 0) {
528 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
529 pwr->interrupt_num);
530 result = -EINVAL;
531 goto done;
532 }
533
534 register_early_suspend(&device->display_off);
535 return result;
536
537clk_err:
538 result = PTR_ERR(clk);
539 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600540 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541
542done:
543 return result;
544}
545
546void kgsl_pwrctrl_close(struct kgsl_device *device)
547{
548 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
549 int i;
550
551 KGSL_PWR_INFO(device, "close device %d\n", device->id);
552
553 unregister_early_suspend(&device->display_off);
554
555 if (pwr->interrupt_num > 0) {
556 if (pwr->have_irq) {
557 free_irq(pwr->interrupt_num, NULL);
558 pwr->have_irq = 0;
559 }
560 pwr->interrupt_num = 0;
561 }
562
563 clk_put(pwr->ebi1_clk);
564
565 if (pwr->pcl)
566 msm_bus_scale_unregister_client(pwr->pcl);
567
568 pwr->pcl = 0;
569
570 if (pwr->gpu_reg) {
571 regulator_put(pwr->gpu_reg);
572 pwr->gpu_reg = NULL;
573 }
574
575 for (i = 1; i < KGSL_MAX_CLKS; i++)
576 if (pwr->grp_clks[i]) {
577 clk_put(pwr->grp_clks[i]);
578 pwr->grp_clks[i] = NULL;
579 }
580
581 pwr->grp_clks[0] = NULL;
582 pwr->power_flags = 0;
583}
584
585void kgsl_idle_check(struct work_struct *work)
586{
587 struct kgsl_device *device = container_of(work, struct kgsl_device,
588 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700589 WARN_ON(device == NULL);
590 if (device == NULL)
591 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592
593 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju24569022011-10-27 11:11:12 -0700595 if ((device->requested_state != KGSL_STATE_SLEEP) &&
596 (device->requested_state != KGSL_STATE_SLUMBER))
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600597 kgsl_pwrscale_idle(device);
598
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700599 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 mod_timer(&device->idle_timer,
601 jiffies +
602 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700603 /* If the GPU has been too busy to sleep, make sure *
604 * that is acurately reflected in the % busy numbers. */
605 device->pwrctrl.busy.no_nap_cnt++;
606 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
607 kgsl_pwrctrl_busy_time(device, true);
608 device->pwrctrl.busy.no_nap_cnt = 0;
609 }
610 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 } else if (device->state & (KGSL_STATE_HUNG |
612 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700613 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 }
615
616 mutex_unlock(&device->mutex);
617}
618
619void kgsl_timer(unsigned long data)
620{
621 struct kgsl_device *device = (struct kgsl_device *) data;
622
623 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530624 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700625 if (device->pwrctrl.restore_slumber)
626 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
627 else
628 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 /* Have work run in a non-interrupt context. */
630 queue_work(device->work_queue, &device->idle_check_ws);
631 }
632}
633
634void kgsl_pre_hwaccess(struct kgsl_device *device)
635{
636 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700637 switch (device->state) {
638 case KGSL_STATE_ACTIVE:
639 return;
640 case KGSL_STATE_NAP:
641 case KGSL_STATE_SLEEP:
642 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700644 break;
645 case KGSL_STATE_SUSPEND:
646 kgsl_check_suspended(device);
647 break;
648 case KGSL_STATE_INIT:
649 case KGSL_STATE_HUNG:
650 case KGSL_STATE_DUMP_AND_RECOVER:
651 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
652 &device->pwrctrl.power_flags))
653 break;
654 else
655 KGSL_PWR_ERR(device,
656 "hw access while clocks off from state %d\n",
657 device->state);
658 break;
659 default:
660 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
661 device->state);
662 break;
663 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664}
665EXPORT_SYMBOL(kgsl_pre_hwaccess);
666
667void kgsl_check_suspended(struct kgsl_device *device)
668{
669 if (device->requested_state == KGSL_STATE_SUSPEND ||
670 device->state == KGSL_STATE_SUSPEND) {
671 mutex_unlock(&device->mutex);
672 wait_for_completion(&device->hwaccess_gate);
673 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700674 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675 mutex_unlock(&device->mutex);
676 wait_for_completion(&device->recovery_gate);
677 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700678 } else if (device->state == KGSL_STATE_SLUMBER)
679 kgsl_pwrctrl_wake(device);
680}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681
Suman Tatiraju24569022011-10-27 11:11:12 -0700682static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700683_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700684{
Suman Tatiraju24569022011-10-27 11:11:12 -0700685 switch (device->state) {
686 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700687 if (!device->ftbl->isidle(device)) {
688 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
689 return -EBUSY;
690 }
691 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
692 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
693 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
694 if (device->idle_wakelock.name)
695 wake_unlock(&device->idle_wakelock);
Suman Tatiraju24569022011-10-27 11:11:12 -0700696 case KGSL_STATE_NAP:
697 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700698 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700699 break;
700 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700701 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700702 break;
703 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700704 return 0;
705}
706
707static void
708_sleep_accounting(struct kgsl_device *device)
709{
710 kgsl_pwrctrl_busy_time(device, false);
711 device->pwrctrl.busy.start.tv_sec = 0;
712 device->pwrctrl.time = 0;
713 kgsl_pwrscale_sleep(device);
714}
715
716static int
717_sleep(struct kgsl_device *device)
718{
719 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
720 switch (device->state) {
721 case KGSL_STATE_ACTIVE:
722 if (!device->ftbl->isidle(device)) {
723 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
724 return -EBUSY;
725 }
726 /* fall through */
727 case KGSL_STATE_NAP:
728 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
729 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
730 if (pwr->pwrlevels[0].gpu_freq > 0)
731 clk_set_rate(pwr->grp_clks[0],
732 pwr->pwrlevels[pwr->num_pwrlevels - 1].
733 gpu_freq);
734 _sleep_accounting(device);
735 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
736 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
737 if (device->idle_wakelock.name)
738 wake_unlock(&device->idle_wakelock);
739 break;
740 case KGSL_STATE_SLEEP:
741 case KGSL_STATE_SLUMBER:
742 break;
743 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700744 KGSL_PWR_WARN(device, "unhandled state %s\n",
745 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700746 break;
747 }
748 return 0;
749}
750
751static int
752_slumber(struct kgsl_device *device)
753{
754 switch (device->state) {
755 case KGSL_STATE_ACTIVE:
756 if (!device->ftbl->isidle(device)) {
757 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
758 device->pwrctrl.restore_slumber = true;
759 return -EBUSY;
760 }
761 /* fall through */
762 case KGSL_STATE_NAP:
763 case KGSL_STATE_SLEEP:
764 del_timer_sync(&device->idle_timer);
765 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
766 device->ftbl->suspend_context(device);
767 device->ftbl->stop(device);
768 device->pwrctrl.restore_slumber = true;
769 _sleep_accounting(device);
770 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
771 if (device->idle_wakelock.name)
772 wake_unlock(&device->idle_wakelock);
773 break;
774 case KGSL_STATE_SLUMBER:
775 break;
776 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700777 KGSL_PWR_WARN(device, "unhandled state %s\n",
778 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700779 break;
780 }
781 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700782}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783
784/******************************************************************/
785/* Caller must hold the device mutex. */
786int kgsl_pwrctrl_sleep(struct kgsl_device *device)
787{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700788 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
790
791 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700792 switch (device->requested_state) {
793 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700794 status = _nap(device);
795 break;
796 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700797 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700798 break;
799 case KGSL_STATE_SLUMBER:
800 status = _slumber(device);
801 break;
802 default:
803 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
804 device->requested_state);
805 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
806 status = -EINVAL;
807 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700809 return status;
810}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700811EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700812
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813/******************************************************************/
814/* Caller must hold the device mutex. */
815void kgsl_pwrctrl_wake(struct kgsl_device *device)
816{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700817 int status;
818 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
819 switch (device->state) {
820 case KGSL_STATE_SLUMBER:
821 status = device->ftbl->start(device, 0);
822 if (status) {
823 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
824 KGSL_DRV_ERR(device, "start failed %d\n", status);
825 break;
826 }
827 /* fall through */
828 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
830 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700831 /* fall through */
832 case KGSL_STATE_NAP:
833 /* Turn on the core clocks */
834 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
835 /* Enable state before turning on irq */
836 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
837 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
838 /* Re-enable HW access */
839 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840 jiffies + device->pwrctrl.interval_timeout);
841
Jeremy Gebben388c2972011-12-16 09:05:07 -0700842 if (device->idle_wakelock.name)
843 wake_lock(&device->idle_wakelock);
844 case KGSL_STATE_ACTIVE:
845 break;
846 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700847 KGSL_PWR_WARN(device, "unhandled state %s\n",
848 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700849 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
850 break;
851 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852}
853EXPORT_SYMBOL(kgsl_pwrctrl_wake);
854
855void kgsl_pwrctrl_enable(struct kgsl_device *device)
856{
857 /* Order pwrrail/clk sequence based upon platform */
858 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
859 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
860 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
861}
862EXPORT_SYMBOL(kgsl_pwrctrl_enable);
863
864void kgsl_pwrctrl_disable(struct kgsl_device *device)
865{
866 /* Order pwrrail/clk sequence based upon platform */
867 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
868 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
869 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
870}
871EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700872
Tarun Karraf8e5cd22012-01-09 14:10:09 -0700873void kgsl_pwrctrl_stop_work(struct kgsl_device *device)
874{
875 del_timer_sync(&device->idle_timer);
876 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
877 mutex_unlock(&device->mutex);
878 flush_workqueue(device->work_queue);
879 mutex_lock(&device->mutex);
880}
881EXPORT_SYMBOL(kgsl_pwrctrl_stop_work);
882
Jeremy Gebben388c2972011-12-16 09:05:07 -0700883void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
884{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700885 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700886 device->state = state;
887 device->requested_state = KGSL_STATE_NONE;
888}
889EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
890
891void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
892{
893 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700894 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700895 device->requested_state = state;
896}
897EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700898
899const char *kgsl_pwrstate_to_str(unsigned int state)
900{
901 switch (state) {
902 case KGSL_STATE_NONE:
903 return "NONE";
904 case KGSL_STATE_INIT:
905 return "INIT";
906 case KGSL_STATE_ACTIVE:
907 return "ACTIVE";
908 case KGSL_STATE_NAP:
909 return "NAP";
910 case KGSL_STATE_SLEEP:
911 return "SLEEP";
912 case KGSL_STATE_SUSPEND:
913 return "SUSPEND";
914 case KGSL_STATE_HUNG:
915 return "HUNG";
916 case KGSL_STATE_DUMP_AND_RECOVER:
917 return "DNR";
918 case KGSL_STATE_SLUMBER:
919 return "SLUMBER";
920 default:
921 break;
922 }
923 return "UNKNOWN";
924}
925EXPORT_SYMBOL(kgsl_pwrstate_to_str);
926