blob: 6ab9534ff28169e881c16d555b045e79d0786051 [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/interrupt.h>
14#include <mach/msm_iomap.h>
15#include <mach/msm_bus.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070016#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017
18#include "kgsl.h"
19#include "kgsl_pwrscale.h"
20#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070021#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060023#define KGSL_PWRFLAGS_POWER_ON 0
24#define KGSL_PWRFLAGS_CLK_ON 1
25#define KGSL_PWRFLAGS_AXI_ON 2
26#define KGSL_PWRFLAGS_IRQ_ON 3
27
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070028#define UPDATE_BUSY_VAL 1000000
29#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060031struct clk_pair {
32 const char *name;
33 uint map;
34};
35
36struct clk_pair clks[KGSL_MAX_CLKS] = {
37 {
38 .name = "src_clk",
39 .map = KGSL_CLK_SRC,
40 },
41 {
42 .name = "core_clk",
43 .map = KGSL_CLK_CORE,
44 },
45 {
46 .name = "iface_clk",
47 .map = KGSL_CLK_IFACE,
48 },
49 {
50 .name = "mem_clk",
51 .map = KGSL_CLK_MEM,
52 },
53 {
54 .name = "mem_iface_clk",
55 .map = KGSL_CLK_MEM_IFACE,
56 },
57};
58
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
60 unsigned int new_level)
61{
62 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
63 if (new_level < (pwr->num_pwrlevels - 1) &&
64 new_level >= pwr->thermal_pwrlevel &&
65 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070066 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -060068 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
69 (device->state == KGSL_STATE_NAP))
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070070 clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060071 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072 if (pwr->pcl)
73 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070074 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060075 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070076 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -060077 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070078 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
79 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080 }
81}
82EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
83
84static int __gpuclk_store(int max, struct device *dev,
85 struct device_attribute *attr,
86 const char *buf, size_t count)
87{ int ret, i, delta = 5000000;
88 unsigned long val;
89 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -060090 struct kgsl_pwrctrl *pwr;
91
92 if (device == NULL)
93 return 0;
94 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
96 ret = sscanf(buf, "%ld", &val);
97 if (ret != 1)
98 return count;
99
100 mutex_lock(&device->mutex);
101 for (i = 0; i < pwr->num_pwrlevels; i++) {
102 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
103 if (max)
104 pwr->thermal_pwrlevel = i;
105 break;
106 }
107 }
108
109 if (i == pwr->num_pwrlevels)
110 goto done;
111
112 /*
113 * If the current or requested clock speed is greater than the
114 * thermal limit, bump down immediately.
115 */
116
117 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
118 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
119 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
120 else if (!max)
121 kgsl_pwrctrl_pwrlevel_change(device, i);
122
123done:
124 mutex_unlock(&device->mutex);
125 return count;
126}
127
128static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
129 struct device_attribute *attr,
130 const char *buf, size_t count)
131{
132 return __gpuclk_store(1, dev, attr, buf, count);
133}
134
135static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
136 struct device_attribute *attr,
137 char *buf)
138{
139 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600140 struct kgsl_pwrctrl *pwr;
141 if (device == NULL)
142 return 0;
143 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 return snprintf(buf, PAGE_SIZE, "%d\n",
145 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
146}
147
148static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
149 struct device_attribute *attr,
150 const char *buf, size_t count)
151{
152 return __gpuclk_store(0, dev, attr, buf, count);
153}
154
155static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
156 struct device_attribute *attr,
157 char *buf)
158{
159 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600160 struct kgsl_pwrctrl *pwr;
161 if (device == NULL)
162 return 0;
163 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 return snprintf(buf, PAGE_SIZE, "%d\n",
165 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
166}
167
168static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
169 struct device_attribute *attr,
170 const char *buf, size_t count)
171{
172 char temp[20];
173 unsigned long val;
174 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600175 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 int rc;
177
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600178 if (device == NULL)
179 return 0;
180 pwr = &device->pwrctrl;
181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 snprintf(temp, sizeof(temp), "%.*s",
183 (int)min(count, sizeof(temp) - 1), buf);
184 rc = strict_strtoul(temp, 0, &val);
185 if (rc)
186 return rc;
187
188 mutex_lock(&device->mutex);
189
190 if (val == 1)
191 pwr->nap_allowed = true;
192 else if (val == 0)
193 pwr->nap_allowed = false;
194
195 mutex_unlock(&device->mutex);
196
197 return count;
198}
199
200static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
201 struct device_attribute *attr,
202 char *buf)
203{
204 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600205 if (device == NULL)
206 return 0;
207 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208}
209
210
211static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
212 struct device_attribute *attr,
213 const char *buf, size_t count)
214{
215 char temp[20];
216 unsigned long val;
217 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600218 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 const long div = 1000/HZ;
220 static unsigned int org_interval_timeout = 1;
221 int rc;
222
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600223 if (device == NULL)
224 return 0;
225 pwr = &device->pwrctrl;
226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 snprintf(temp, sizeof(temp), "%.*s",
228 (int)min(count, sizeof(temp) - 1), buf);
229 rc = strict_strtoul(temp, 0, &val);
230 if (rc)
231 return rc;
232
233 if (org_interval_timeout == 1)
234 org_interval_timeout = pwr->interval_timeout;
235
236 mutex_lock(&device->mutex);
237
238 /* Let the timeout be requested in ms, but convert to jiffies. */
239 val /= div;
240 if (val >= org_interval_timeout)
241 pwr->interval_timeout = val;
242
243 mutex_unlock(&device->mutex);
244
245 return count;
246}
247
248static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
249 struct device_attribute *attr,
250 char *buf)
251{
252 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600253 if (device == NULL)
254 return 0;
255 return snprintf(buf, PAGE_SIZE, "%d\n",
256 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257}
258
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700259static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
260 struct device_attribute *attr,
261 char *buf)
262{
263 int ret;
264 struct kgsl_device *device = kgsl_device_from_dev(dev);
265 struct kgsl_busy *b = &device->pwrctrl.busy;
266 ret = snprintf(buf, 17, "%7d %7d\n",
267 b->on_time_old, b->time_old);
268 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
269 b->on_time_old = 0;
270 b->time_old = 0;
271 }
272 return ret;
273}
274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
276DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
277 kgsl_pwrctrl_max_gpuclk_store);
Lucille Sylvester67138c92011-12-07 17:26:29 -0700278DEVICE_ATTR(pwrnap, 0666, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
280 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700281DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
282 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283
284static const struct device_attribute *pwrctrl_attr_list[] = {
285 &dev_attr_gpuclk,
286 &dev_attr_max_gpuclk,
287 &dev_attr_pwrnap,
288 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700289 &dev_attr_gpubusy,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 NULL
291};
292
293int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
294{
295 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
296}
297
298void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
299{
300 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
301}
302
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700303/* Track the amount of time the gpu is on vs the total system time. *
304 * Regularly update the percentage of busy time displayed by sysfs. */
305static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
306{
307 struct kgsl_busy *b = &device->pwrctrl.busy;
308 int elapsed;
309 if (b->start.tv_sec == 0)
310 do_gettimeofday(&(b->start));
311 do_gettimeofday(&(b->stop));
312 elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
313 elapsed += b->stop.tv_usec - b->start.tv_usec;
314 b->time += elapsed;
315 if (on_time)
316 b->on_time += elapsed;
317 /* Update the output regularly and reset the counters. */
318 if ((b->time > UPDATE_BUSY_VAL) ||
319 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
320 b->on_time_old = b->on_time;
321 b->time_old = b->time;
322 b->on_time = 0;
323 b->time = 0;
324 }
325 do_gettimeofday(&(b->start));
326}
327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
329{
330 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
331 int i = 0;
332 if (state == KGSL_PWRFLAGS_OFF) {
333 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
334 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700335 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
337 if (pwr->grp_clks[i])
338 clk_disable(pwr->grp_clks[i]);
339 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
340 (device->requested_state != KGSL_STATE_NAP))
341 clk_set_rate(pwr->grp_clks[0],
342 pwr->pwrlevels[pwr->num_pwrlevels - 1].
343 gpu_freq);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700344 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 }
346 } else if (state == KGSL_PWRFLAGS_ON) {
347 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
348 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700349 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
351 (device->state != KGSL_STATE_NAP))
352 clk_set_rate(pwr->grp_clks[0],
353 pwr->pwrlevels[pwr->active_pwrlevel].
354 gpu_freq);
355
356 /* as last step, enable grp_clk
357 this is to let GPU interrupt to come */
358 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
359 if (pwr->grp_clks[i])
360 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700361 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 }
363 }
364}
Tarun Karraf8e5cd22012-01-09 14:10:09 -0700365EXPORT_SYMBOL(kgsl_pwrctrl_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366
367void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
368{
369 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
370
371 if (state == KGSL_PWRFLAGS_OFF) {
372 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
373 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700374 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530375 if (pwr->ebi1_clk) {
376 clk_set_rate(pwr->ebi1_clk, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 clk_disable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530378 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 if (pwr->pcl)
380 msm_bus_scale_client_update_request(pwr->pcl,
381 0);
382 }
383 } else if (state == KGSL_PWRFLAGS_ON) {
384 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
385 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700386 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530387 if (pwr->ebi1_clk) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 clk_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530389 clk_set_rate(pwr->ebi1_clk,
390 pwr->pwrlevels[pwr->active_pwrlevel].
391 bus_freq);
392 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 if (pwr->pcl)
394 msm_bus_scale_client_update_request(pwr->pcl,
395 pwr->pwrlevels[pwr->active_pwrlevel].
396 bus_freq);
397 }
398 }
399}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400
401void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
402{
403 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
404
405 if (state == KGSL_PWRFLAGS_OFF) {
406 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
407 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700408 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 if (pwr->gpu_reg)
410 regulator_disable(pwr->gpu_reg);
411 }
412 } else if (state == KGSL_PWRFLAGS_ON) {
413 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
414 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700415 trace_kgsl_rail(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 if (pwr->gpu_reg)
417 regulator_enable(pwr->gpu_reg);
418 }
419 }
420}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421
422void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
423{
424 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
425
426 if (state == KGSL_PWRFLAGS_ON) {
427 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
428 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700429 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 }
432 } else if (state == KGSL_PWRFLAGS_OFF) {
433 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
434 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700435 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600436 if (in_interrupt())
437 disable_irq_nosync(pwr->interrupt_num);
438 else
439 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 }
441 }
442}
443EXPORT_SYMBOL(kgsl_pwrctrl_irq);
444
445int kgsl_pwrctrl_init(struct kgsl_device *device)
446{
447 int i, result = 0;
448 struct clk *clk;
449 struct platform_device *pdev =
450 container_of(device->parentdev, struct platform_device, dev);
451 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600452 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453
454 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600455 for (i = 0; i < KGSL_MAX_CLKS; i++) {
456 if (pdata->clk_map & clks[i].map) {
457 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 if (IS_ERR(clk))
459 goto clk_err;
460 pwr->grp_clks[i] = clk;
461 }
462 }
463 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600464 if (pwr->grp_clks[0] == NULL)
465 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466
467 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600468 if (pdata->set_grp_async != NULL)
469 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600471 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600473 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474 result = -EINVAL;
475 goto done;
476 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600477 pwr->num_pwrlevels = pdata->num_levels;
478 pwr->active_pwrlevel = pdata->init_level;
479 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600481 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600483 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 gpu_freq) : 0;
485 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600486 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600487 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600488 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 }
490 /* Do not set_rate for targets in sync with AXI */
491 if (pwr->pwrlevels[0].gpu_freq > 0)
492 clk_set_rate(pwr->grp_clks[0], pwr->
493 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
494
495 pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
496 if (IS_ERR(pwr->gpu_reg))
497 pwr->gpu_reg = NULL;
498
499 pwr->power_flags = 0;
500
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600501 pwr->nap_allowed = pdata->nap_allowed;
502 pwr->interval_timeout = pdata->idle_timeout;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700503 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 if (IS_ERR(pwr->ebi1_clk))
505 pwr->ebi1_clk = NULL;
506 else
507 clk_set_rate(pwr->ebi1_clk,
508 pwr->pwrlevels[pwr->active_pwrlevel].
509 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600510 if (pdata->bus_scale_table != NULL) {
511 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 bus_scale_table);
513 if (!pwr->pcl) {
514 KGSL_PWR_ERR(device,
515 "msm_bus_scale_register_client failed: "
516 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600517 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 result = -EINVAL;
519 goto done;
520 }
521 }
522
523 /*acquire interrupt */
524 pwr->interrupt_num =
525 platform_get_irq_byname(pdev, pwr->irq_name);
526
527 if (pwr->interrupt_num <= 0) {
528 KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
529 pwr->interrupt_num);
530 result = -EINVAL;
531 goto done;
532 }
533
534 register_early_suspend(&device->display_off);
535 return result;
536
537clk_err:
538 result = PTR_ERR(clk);
539 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600540 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541
542done:
543 return result;
544}
545
546void kgsl_pwrctrl_close(struct kgsl_device *device)
547{
548 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
549 int i;
550
551 KGSL_PWR_INFO(device, "close device %d\n", device->id);
552
553 unregister_early_suspend(&device->display_off);
554
555 if (pwr->interrupt_num > 0) {
556 if (pwr->have_irq) {
557 free_irq(pwr->interrupt_num, NULL);
558 pwr->have_irq = 0;
559 }
560 pwr->interrupt_num = 0;
561 }
562
563 clk_put(pwr->ebi1_clk);
564
565 if (pwr->pcl)
566 msm_bus_scale_unregister_client(pwr->pcl);
567
568 pwr->pcl = 0;
569
570 if (pwr->gpu_reg) {
571 regulator_put(pwr->gpu_reg);
572 pwr->gpu_reg = NULL;
573 }
574
575 for (i = 1; i < KGSL_MAX_CLKS; i++)
576 if (pwr->grp_clks[i]) {
577 clk_put(pwr->grp_clks[i]);
578 pwr->grp_clks[i] = NULL;
579 }
580
581 pwr->grp_clks[0] = NULL;
582 pwr->power_flags = 0;
583}
584
585void kgsl_idle_check(struct work_struct *work)
586{
587 struct kgsl_device *device = container_of(work, struct kgsl_device,
588 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700589 WARN_ON(device == NULL);
590 if (device == NULL)
591 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592
593 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju24569022011-10-27 11:11:12 -0700595 if ((device->requested_state != KGSL_STATE_SLEEP) &&
596 (device->requested_state != KGSL_STATE_SLUMBER))
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600597 kgsl_pwrscale_idle(device);
598
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700599 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 mod_timer(&device->idle_timer,
601 jiffies +
602 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700603 /* If the GPU has been too busy to sleep, make sure *
604 * that is acurately reflected in the % busy numbers. */
605 device->pwrctrl.busy.no_nap_cnt++;
606 if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
607 kgsl_pwrctrl_busy_time(device, true);
608 device->pwrctrl.busy.no_nap_cnt = 0;
609 }
610 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 } else if (device->state & (KGSL_STATE_HUNG |
612 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700613 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 }
615
616 mutex_unlock(&device->mutex);
617}
618
619void kgsl_timer(unsigned long data)
620{
621 struct kgsl_device *device = (struct kgsl_device *) data;
622
623 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Jeremy Gebbena87d51a2012-01-09 13:36:06 -0700624 if (device->requested_state == KGSL_STATE_NONE) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700625 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 /* Have work run in a non-interrupt context. */
627 queue_work(device->work_queue, &device->idle_check_ws);
628 }
629}
630
631void kgsl_pre_hwaccess(struct kgsl_device *device)
632{
633 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700634 switch (device->state) {
635 case KGSL_STATE_ACTIVE:
636 return;
637 case KGSL_STATE_NAP:
638 case KGSL_STATE_SLEEP:
639 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700641 break;
642 case KGSL_STATE_SUSPEND:
643 kgsl_check_suspended(device);
644 break;
645 case KGSL_STATE_INIT:
646 case KGSL_STATE_HUNG:
647 case KGSL_STATE_DUMP_AND_RECOVER:
648 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
649 &device->pwrctrl.power_flags))
650 break;
651 else
652 KGSL_PWR_ERR(device,
653 "hw access while clocks off from state %d\n",
654 device->state);
655 break;
656 default:
657 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
658 device->state);
659 break;
660 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661}
662EXPORT_SYMBOL(kgsl_pre_hwaccess);
663
664void kgsl_check_suspended(struct kgsl_device *device)
665{
666 if (device->requested_state == KGSL_STATE_SUSPEND ||
667 device->state == KGSL_STATE_SUSPEND) {
668 mutex_unlock(&device->mutex);
669 wait_for_completion(&device->hwaccess_gate);
670 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700671 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 mutex_unlock(&device->mutex);
673 wait_for_completion(&device->recovery_gate);
674 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700675 } else if (device->state == KGSL_STATE_SLUMBER)
676 kgsl_pwrctrl_wake(device);
677}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678
Suman Tatiraju24569022011-10-27 11:11:12 -0700679static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700680_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700681{
Suman Tatiraju24569022011-10-27 11:11:12 -0700682 switch (device->state) {
683 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700684 if (!device->ftbl->isidle(device)) {
685 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
686 return -EBUSY;
687 }
688 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
689 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
690 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
691 if (device->idle_wakelock.name)
692 wake_unlock(&device->idle_wakelock);
Suman Tatiraju24569022011-10-27 11:11:12 -0700693 case KGSL_STATE_NAP:
694 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700695 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700696 break;
697 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700698 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700699 break;
700 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700701 return 0;
702}
703
704static void
705_sleep_accounting(struct kgsl_device *device)
706{
707 kgsl_pwrctrl_busy_time(device, false);
708 device->pwrctrl.busy.start.tv_sec = 0;
709 device->pwrctrl.time = 0;
710 kgsl_pwrscale_sleep(device);
711}
712
713static int
714_sleep(struct kgsl_device *device)
715{
716 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
717 switch (device->state) {
718 case KGSL_STATE_ACTIVE:
719 if (!device->ftbl->isidle(device)) {
720 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
721 return -EBUSY;
722 }
723 /* fall through */
724 case KGSL_STATE_NAP:
725 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
726 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
727 if (pwr->pwrlevels[0].gpu_freq > 0)
728 clk_set_rate(pwr->grp_clks[0],
729 pwr->pwrlevels[pwr->num_pwrlevels - 1].
730 gpu_freq);
731 _sleep_accounting(device);
732 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
733 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
734 if (device->idle_wakelock.name)
735 wake_unlock(&device->idle_wakelock);
736 break;
737 case KGSL_STATE_SLEEP:
738 case KGSL_STATE_SLUMBER:
739 break;
740 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700741 KGSL_PWR_WARN(device, "unhandled state %s\n",
742 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700743 break;
744 }
745 return 0;
746}
747
748static int
749_slumber(struct kgsl_device *device)
750{
751 switch (device->state) {
752 case KGSL_STATE_ACTIVE:
753 if (!device->ftbl->isidle(device)) {
754 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
755 device->pwrctrl.restore_slumber = true;
756 return -EBUSY;
757 }
758 /* fall through */
759 case KGSL_STATE_NAP:
760 case KGSL_STATE_SLEEP:
761 del_timer_sync(&device->idle_timer);
762 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
763 device->ftbl->suspend_context(device);
764 device->ftbl->stop(device);
765 device->pwrctrl.restore_slumber = true;
766 _sleep_accounting(device);
767 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
768 if (device->idle_wakelock.name)
769 wake_unlock(&device->idle_wakelock);
770 break;
771 case KGSL_STATE_SLUMBER:
772 break;
773 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700774 KGSL_PWR_WARN(device, "unhandled state %s\n",
775 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700776 break;
777 }
778 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700779}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780
781/******************************************************************/
782/* Caller must hold the device mutex. */
783int kgsl_pwrctrl_sleep(struct kgsl_device *device)
784{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700785 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
787
788 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700789 switch (device->requested_state) {
790 case KGSL_STATE_NAP:
Suman Tatiraju24569022011-10-27 11:11:12 -0700791 if (device->pwrctrl.restore_slumber) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700792 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
793 break;
794 }
795 status = _nap(device);
796 break;
797 case KGSL_STATE_SLEEP:
798 if (device->pwrctrl.restore_slumber)
799 status = _slumber(device);
Lucille Sylvester43deede2011-12-15 16:37:25 -0700800 else
Jeremy Gebben388c2972011-12-16 09:05:07 -0700801 status = _sleep(device);
802 break;
803 case KGSL_STATE_SLUMBER:
804 status = _slumber(device);
805 break;
806 default:
807 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
808 device->requested_state);
809 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
810 status = -EINVAL;
811 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700813 return status;
814}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700815EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700816
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817/******************************************************************/
818/* Caller must hold the device mutex. */
819void kgsl_pwrctrl_wake(struct kgsl_device *device)
820{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700821 int status;
822 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
823 switch (device->state) {
824 case KGSL_STATE_SLUMBER:
825 status = device->ftbl->start(device, 0);
826 if (status) {
827 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
828 KGSL_DRV_ERR(device, "start failed %d\n", status);
829 break;
830 }
831 /* fall through */
832 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
834 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700835 /* fall through */
836 case KGSL_STATE_NAP:
837 /* Turn on the core clocks */
838 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
839 /* Enable state before turning on irq */
840 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
841 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
842 /* Re-enable HW access */
843 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 jiffies + device->pwrctrl.interval_timeout);
845
Jeremy Gebben388c2972011-12-16 09:05:07 -0700846 if (device->idle_wakelock.name)
847 wake_lock(&device->idle_wakelock);
848 case KGSL_STATE_ACTIVE:
849 break;
850 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700851 KGSL_PWR_WARN(device, "unhandled state %s\n",
852 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700853 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
854 break;
855 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856}
857EXPORT_SYMBOL(kgsl_pwrctrl_wake);
858
859void kgsl_pwrctrl_enable(struct kgsl_device *device)
860{
861 /* Order pwrrail/clk sequence based upon platform */
862 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
863 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
864 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
865}
866EXPORT_SYMBOL(kgsl_pwrctrl_enable);
867
868void kgsl_pwrctrl_disable(struct kgsl_device *device)
869{
870 /* Order pwrrail/clk sequence based upon platform */
871 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
872 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
873 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
874}
875EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700876
Tarun Karraf8e5cd22012-01-09 14:10:09 -0700877void kgsl_pwrctrl_stop_work(struct kgsl_device *device)
878{
879 del_timer_sync(&device->idle_timer);
880 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
881 mutex_unlock(&device->mutex);
882 flush_workqueue(device->work_queue);
883 mutex_lock(&device->mutex);
884}
885EXPORT_SYMBOL(kgsl_pwrctrl_stop_work);
886
Jeremy Gebben388c2972011-12-16 09:05:07 -0700887void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
888{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700889 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700890 device->state = state;
891 device->requested_state = KGSL_STATE_NONE;
892}
893EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
894
895void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
896{
897 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700898 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700899 device->requested_state = state;
900}
901EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700902
903const char *kgsl_pwrstate_to_str(unsigned int state)
904{
905 switch (state) {
906 case KGSL_STATE_NONE:
907 return "NONE";
908 case KGSL_STATE_INIT:
909 return "INIT";
910 case KGSL_STATE_ACTIVE:
911 return "ACTIVE";
912 case KGSL_STATE_NAP:
913 return "NAP";
914 case KGSL_STATE_SLEEP:
915 return "SLEEP";
916 case KGSL_STATE_SUSPEND:
917 return "SUSPEND";
918 case KGSL_STATE_HUNG:
919 return "HUNG";
920 case KGSL_STATE_DUMP_AND_RECOVER:
921 return "DNR";
922 case KGSL_STATE_SLUMBER:
923 return "SLUMBER";
924 default:
925 break;
926 }
927 return "UNKNOWN";
928}
929EXPORT_SYMBOL(kgsl_pwrstate_to_str);
930