blob: 9d89b47ae573784ff17fe120e50a35d7d221af57 [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080020#include <linux/ktime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_pwrscale.h"
24#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070025#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060027#define KGSL_PWRFLAGS_POWER_ON 0
28#define KGSL_PWRFLAGS_CLK_ON 1
29#define KGSL_PWRFLAGS_AXI_ON 2
30#define KGSL_PWRFLAGS_IRQ_ON 3
31
Lucille Sylvester10297892012-02-27 13:54:47 -070032#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070033#define UPDATE_BUSY_VAL 1000000
34#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060036struct clk_pair {
37 const char *name;
38 uint map;
39};
40
41struct clk_pair clks[KGSL_MAX_CLKS] = {
42 {
43 .name = "src_clk",
44 .map = KGSL_CLK_SRC,
45 },
46 {
47 .name = "core_clk",
48 .map = KGSL_CLK_CORE,
49 },
50 {
51 .name = "iface_clk",
52 .map = KGSL_CLK_IFACE,
53 },
54 {
55 .name = "mem_clk",
56 .map = KGSL_CLK_MEM,
57 },
58 {
59 .name = "mem_iface_clk",
60 .map = KGSL_CLK_MEM_IFACE,
61 },
62};
63
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080064/* Update the elapsed time at a particular clock level
65 * if the device is active(on_time = true).Otherwise
66 * store it as sleep time.
67 */
68static void update_clk_statistics(struct kgsl_device *device,
69 bool on_time)
70{
71 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
72 struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
73 ktime_t elapsed;
74 int elapsed_us;
75 if (clkstats->start.tv64 == 0)
76 clkstats->start = ktime_get();
77 clkstats->stop = ktime_get();
78 elapsed = ktime_sub(clkstats->stop, clkstats->start);
79 elapsed_us = ktime_to_us(elapsed);
80 clkstats->elapsed += elapsed_us;
81 if (on_time)
82 clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
83 else
84 clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
85 clkstats->start = ktime_get();
86}
87
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
89 unsigned int new_level)
90{
91 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
92 if (new_level < (pwr->num_pwrlevels - 1) &&
93 new_level >= pwr->thermal_pwrlevel &&
94 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070095 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060096 int diff = new_level - pwr->active_pwrlevel;
97 int d = (diff > 0) ? 1 : -1;
98 int level = pwr->active_pwrlevel;
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080099 /* Update the clock stats */
100 update_clk_statistics(device, true);
101 /* Finally set active level */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -0600103 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
Kedar Joshic11d0982012-02-07 10:59:49 +0530104 (device->state == KGSL_STATE_NAP)) {
105 /*
106 * On some platforms, instability is caused on
107 * changing clock freq when the core is busy.
108 * Idle the gpu core before changing the clock freq.
109 */
110 if (pwr->idle_needed == true)
Jordan Crousea29a2e02012-08-14 09:09:23 -0600111 device->ftbl->idle(device);
112
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -0600113 /* Don't shift by more than one level at a time to
114 * avoid glitches.
115 */
116 while (level != new_level) {
117 level += d;
118 clk_set_rate(pwr->grp_clks[0],
119 pwr->pwrlevels[level].gpu_freq);
120 }
Kedar Joshic11d0982012-02-07 10:59:49 +0530121 }
Lucille Sylvester622927a2011-08-10 14:42:25 -0600122 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 if (pwr->pcl)
124 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700125 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -0600126 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700127 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -0600128 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700129 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
130 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 }
132}
133EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
134
135static int __gpuclk_store(int max, struct device *dev,
136 struct device_attribute *attr,
137 const char *buf, size_t count)
138{ int ret, i, delta = 5000000;
139 unsigned long val;
140 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600141 struct kgsl_pwrctrl *pwr;
142
143 if (device == NULL)
144 return 0;
145 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146
147 ret = sscanf(buf, "%ld", &val);
148 if (ret != 1)
149 return count;
150
151 mutex_lock(&device->mutex);
Lucille Sylvesterb7626dc62012-06-28 18:46:24 -0600152 for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
154 if (max)
155 pwr->thermal_pwrlevel = i;
156 break;
157 }
158 }
159
Lucille Sylvesterb7626dc62012-06-28 18:46:24 -0600160 if (i == (pwr->num_pwrlevels - 1))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161 goto done;
162
163 /*
164 * If the current or requested clock speed is greater than the
165 * thermal limit, bump down immediately.
166 */
167
168 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
169 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
170 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
171 else if (!max)
172 kgsl_pwrctrl_pwrlevel_change(device, i);
173
174done:
175 mutex_unlock(&device->mutex);
176 return count;
177}
178
179static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
180 struct device_attribute *attr,
181 const char *buf, size_t count)
182{
183 return __gpuclk_store(1, dev, attr, buf, count);
184}
185
186static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
187 struct device_attribute *attr,
188 char *buf)
189{
190 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600191 struct kgsl_pwrctrl *pwr;
192 if (device == NULL)
193 return 0;
194 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 return snprintf(buf, PAGE_SIZE, "%d\n",
196 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
197}
198
199static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
200 struct device_attribute *attr,
201 const char *buf, size_t count)
202{
203 return __gpuclk_store(0, dev, attr, buf, count);
204}
205
206static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209{
210 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600211 struct kgsl_pwrctrl *pwr;
212 if (device == NULL)
213 return 0;
214 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 return snprintf(buf, PAGE_SIZE, "%d\n",
216 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
217}
218
219static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
220 struct device_attribute *attr,
221 const char *buf, size_t count)
222{
223 char temp[20];
224 unsigned long val;
225 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600226 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 int rc;
228
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600229 if (device == NULL)
230 return 0;
231 pwr = &device->pwrctrl;
232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 snprintf(temp, sizeof(temp), "%.*s",
234 (int)min(count, sizeof(temp) - 1), buf);
235 rc = strict_strtoul(temp, 0, &val);
236 if (rc)
237 return rc;
238
239 mutex_lock(&device->mutex);
240
241 if (val == 1)
242 pwr->nap_allowed = true;
243 else if (val == 0)
244 pwr->nap_allowed = false;
245
246 mutex_unlock(&device->mutex);
247
248 return count;
249}
250
251static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
252 struct device_attribute *attr,
253 char *buf)
254{
255 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600256 if (device == NULL)
257 return 0;
258 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259}
260
261
262static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
263 struct device_attribute *attr,
264 const char *buf, size_t count)
265{
266 char temp[20];
267 unsigned long val;
268 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600269 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270 const long div = 1000/HZ;
271 static unsigned int org_interval_timeout = 1;
272 int rc;
273
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600274 if (device == NULL)
275 return 0;
276 pwr = &device->pwrctrl;
277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 snprintf(temp, sizeof(temp), "%.*s",
279 (int)min(count, sizeof(temp) - 1), buf);
280 rc = strict_strtoul(temp, 0, &val);
281 if (rc)
282 return rc;
283
284 if (org_interval_timeout == 1)
285 org_interval_timeout = pwr->interval_timeout;
286
287 mutex_lock(&device->mutex);
288
289 /* Let the timeout be requested in ms, but convert to jiffies. */
290 val /= div;
291 if (val >= org_interval_timeout)
292 pwr->interval_timeout = val;
293
294 mutex_unlock(&device->mutex);
295
296 return count;
297}
298
299static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
300 struct device_attribute *attr,
301 char *buf)
302{
303 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600304 if (device == NULL)
305 return 0;
306 return snprintf(buf, PAGE_SIZE, "%d\n",
307 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308}
309
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700310static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
311 struct device_attribute *attr,
312 char *buf)
313{
314 int ret;
315 struct kgsl_device *device = kgsl_device_from_dev(dev);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800316 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
317 ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
318 clkstats->on_time_old, clkstats->elapsed_old);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700319 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800320 clkstats->on_time_old = 0;
321 clkstats->elapsed_old = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700322 }
323 return ret;
324}
325
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800326static int kgsl_pwrctrl_gputop_show(struct device *dev,
327 struct device_attribute *attr,
328 char *buf)
329{
330 int ret;
331 struct kgsl_device *device = kgsl_device_from_dev(dev);
332 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
333 int i = 0;
334 char *ptr = buf;
335
336 ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
337 clkstats->elapsed_old);
338 for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
339 i++, ptr += ret)
340 ret = snprintf(ptr, PAGE_SIZE, "%7d ",
341 clkstats->old_clock_time[i]);
342
343 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
344 clkstats->on_time_old = 0;
345 clkstats->elapsed_old = 0;
346 for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
347 clkstats->old_clock_time[i] = 0;
348 }
349 return (unsigned int) (ptr - buf);
350}
351
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
353DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
354 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530355DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
357 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800358DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
359 NULL);
360DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700361 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362
363static const struct device_attribute *pwrctrl_attr_list[] = {
364 &dev_attr_gpuclk,
365 &dev_attr_max_gpuclk,
366 &dev_attr_pwrnap,
367 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700368 &dev_attr_gpubusy,
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800369 &dev_attr_gputop,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 NULL
371};
372
373int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
374{
375 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
376}
377
378void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
379{
380 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
381}
382
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800383static void update_statistics(struct kgsl_device *device)
384{
385 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
386 unsigned int on_time = 0;
387 int i;
388 int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
389 /*PER CLK TIME*/
390 for (i = 0; i < num_pwrlevels; i++) {
391 clkstats->old_clock_time[i] = clkstats->clock_time[i];
392 on_time += clkstats->clock_time[i];
393 clkstats->clock_time[i] = 0;
394 }
395 clkstats->old_clock_time[num_pwrlevels] =
396 clkstats->clock_time[num_pwrlevels];
397 clkstats->clock_time[num_pwrlevels] = 0;
398 clkstats->on_time_old = on_time;
399 clkstats->elapsed_old = clkstats->elapsed;
400 clkstats->elapsed = 0;
401}
402
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700403/* Track the amount of time the gpu is on vs the total system time. *
404 * Regularly update the percentage of busy time displayed by sysfs. */
405static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
406{
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800407 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
408 update_clk_statistics(device, on_time);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700409 /* Update the output regularly and reset the counters. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800410 if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700411 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800412 update_statistics(device);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700413 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700414}
415
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600416void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
417 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418{
419 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
420 int i = 0;
421 if (state == KGSL_PWRFLAGS_OFF) {
422 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
423 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700424 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
426 if (pwr->grp_clks[i])
427 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700428 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700430 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 clk_set_rate(pwr->grp_clks[0],
432 pwr->pwrlevels[pwr->num_pwrlevels - 1].
433 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700434 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
435 if (pwr->grp_clks[i])
436 clk_unprepare(pwr->grp_clks[i]);
437 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700438 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 }
440 } else if (state == KGSL_PWRFLAGS_ON) {
441 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
442 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700443 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700444 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700446 (device->state != KGSL_STATE_NAP)) {
447 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
448 if (pwr->grp_clks[i])
449 clk_prepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 clk_set_rate(pwr->grp_clks[0],
451 pwr->pwrlevels[pwr->active_pwrlevel].
452 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700453 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454
455 /* as last step, enable grp_clk
456 this is to let GPU interrupt to come */
457 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
458 if (pwr->grp_clks[i])
459 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700460 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 }
462 }
463}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464
465void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
466{
467 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
468
469 if (state == KGSL_PWRFLAGS_OFF) {
470 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
471 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700472 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530473 if (pwr->ebi1_clk) {
474 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600475 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530476 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 if (pwr->pcl)
478 msm_bus_scale_client_update_request(pwr->pcl,
479 0);
480 }
481 } else if (state == KGSL_PWRFLAGS_ON) {
482 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
483 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700484 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530485 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600486 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530487 clk_set_rate(pwr->ebi1_clk,
488 pwr->pwrlevels[pwr->active_pwrlevel].
489 bus_freq);
490 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 if (pwr->pcl)
492 msm_bus_scale_client_update_request(pwr->pcl,
493 pwr->pwrlevels[pwr->active_pwrlevel].
494 bus_freq);
495 }
496 }
497}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498
499void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
500{
501 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
502
503 if (state == KGSL_PWRFLAGS_OFF) {
504 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
505 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700506 trace_kgsl_rail(device, state);
Pu Chen12053782012-07-24 17:04:27 -0700507 if (pwr->gpu_cx)
508 regulator_disable(pwr->gpu_cx);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 if (pwr->gpu_reg)
510 regulator_disable(pwr->gpu_reg);
511 }
512 } else if (state == KGSL_PWRFLAGS_ON) {
513 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
514 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700515 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600516 if (pwr->gpu_reg) {
517 int status = regulator_enable(pwr->gpu_reg);
518 if (status)
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700519 KGSL_DRV_ERR(device,
520 "core regulator_enable "
521 "failed: %d\n",
522 status);
523 }
Pu Chen12053782012-07-24 17:04:27 -0700524 if (pwr->gpu_cx) {
525 int status = regulator_enable(pwr->gpu_cx);
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700526 if (status)
527 KGSL_DRV_ERR(device,
528 "cx regulator_enable "
529 "failed: %d\n",
530 status);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600531 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 }
533 }
534}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535
536void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
537{
538 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
539
540 if (state == KGSL_PWRFLAGS_ON) {
541 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
542 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700543 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545 }
546 } else if (state == KGSL_PWRFLAGS_OFF) {
547 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
548 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700549 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600550 if (in_interrupt())
551 disable_irq_nosync(pwr->interrupt_num);
552 else
553 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554 }
555 }
556}
557EXPORT_SYMBOL(kgsl_pwrctrl_irq);
558
559int kgsl_pwrctrl_init(struct kgsl_device *device)
560{
561 int i, result = 0;
562 struct clk *clk;
563 struct platform_device *pdev =
564 container_of(device->parentdev, struct platform_device, dev);
565 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600566 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567
568 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600569 for (i = 0; i < KGSL_MAX_CLKS; i++) {
570 if (pdata->clk_map & clks[i].map) {
571 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 if (IS_ERR(clk))
573 goto clk_err;
574 pwr->grp_clks[i] = clk;
575 }
576 }
577 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600578 if (pwr->grp_clks[0] == NULL)
579 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580
581 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600582 if (pdata->set_grp_async != NULL)
583 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600585 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600587 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 result = -EINVAL;
589 goto done;
590 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600591 pwr->num_pwrlevels = pdata->num_levels;
592 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800593 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600594 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600596 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600598 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 gpu_freq) : 0;
600 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600601 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600602 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600603 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 }
605 /* Do not set_rate for targets in sync with AXI */
606 if (pwr->pwrlevels[0].gpu_freq > 0)
607 clk_set_rate(pwr->grp_clks[0], pwr->
608 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
609
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700610 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 if (IS_ERR(pwr->gpu_reg))
612 pwr->gpu_reg = NULL;
613
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700614 if (pwr->gpu_reg) {
Pu Chen12053782012-07-24 17:04:27 -0700615 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
616 if (IS_ERR(pwr->gpu_cx))
617 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700618 } else
Pu Chen12053782012-07-24 17:04:27 -0700619 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700620
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 pwr->power_flags = 0;
622
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600623 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530624 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600625 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700626 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700627 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628 if (IS_ERR(pwr->ebi1_clk))
629 pwr->ebi1_clk = NULL;
630 else
631 clk_set_rate(pwr->ebi1_clk,
632 pwr->pwrlevels[pwr->active_pwrlevel].
633 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600634 if (pdata->bus_scale_table != NULL) {
635 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 bus_scale_table);
637 if (!pwr->pcl) {
638 KGSL_PWR_ERR(device,
639 "msm_bus_scale_register_client failed: "
640 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600641 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 result = -EINVAL;
643 goto done;
644 }
645 }
646
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700648 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 register_early_suspend(&device->display_off);
650 return result;
651
652clk_err:
653 result = PTR_ERR(clk);
654 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600655 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656
657done:
658 return result;
659}
660
661void kgsl_pwrctrl_close(struct kgsl_device *device)
662{
663 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
664 int i;
665
666 KGSL_PWR_INFO(device, "close device %d\n", device->id);
667
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700668 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 unregister_early_suspend(&device->display_off);
670
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 clk_put(pwr->ebi1_clk);
672
673 if (pwr->pcl)
674 msm_bus_scale_unregister_client(pwr->pcl);
675
676 pwr->pcl = 0;
677
678 if (pwr->gpu_reg) {
679 regulator_put(pwr->gpu_reg);
680 pwr->gpu_reg = NULL;
681 }
682
Pu Chen12053782012-07-24 17:04:27 -0700683 if (pwr->gpu_cx) {
684 regulator_put(pwr->gpu_cx);
685 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700686 }
687
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688 for (i = 1; i < KGSL_MAX_CLKS; i++)
689 if (pwr->grp_clks[i]) {
690 clk_put(pwr->grp_clks[i]);
691 pwr->grp_clks[i] = NULL;
692 }
693
694 pwr->grp_clks[0] = NULL;
695 pwr->power_flags = 0;
696}
697
698void kgsl_idle_check(struct work_struct *work)
699{
700 struct kgsl_device *device = container_of(work, struct kgsl_device,
701 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700702 WARN_ON(device == NULL);
703 if (device == NULL)
704 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705
706 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700707 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju3de11cc2012-06-20 10:33:32 -0700708 kgsl_pwrscale_idle(device, 0);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600709
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700710 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 mod_timer(&device->idle_timer,
712 jiffies +
713 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700714 /* If the GPU has been too busy to sleep, make sure *
715 * that is acurately reflected in the % busy numbers. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800716 device->pwrctrl.clk_stats.no_nap_cnt++;
717 if (device->pwrctrl.clk_stats.no_nap_cnt >
718 UPDATE_BUSY) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700719 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800720 device->pwrctrl.clk_stats.no_nap_cnt = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700721 }
722 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723 } else if (device->state & (KGSL_STATE_HUNG |
724 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700725 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726 }
727
728 mutex_unlock(&device->mutex);
729}
730
731void kgsl_timer(unsigned long data)
732{
733 struct kgsl_device *device = (struct kgsl_device *) data;
734
735 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530736 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -0700737 if (device->pwrctrl.restore_slumber ||
738 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700739 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
740 else
741 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700742 /* Have work run in a non-interrupt context. */
743 queue_work(device->work_queue, &device->idle_check_ws);
744 }
745}
746
747void kgsl_pre_hwaccess(struct kgsl_device *device)
748{
749 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700750 switch (device->state) {
751 case KGSL_STATE_ACTIVE:
752 return;
753 case KGSL_STATE_NAP:
754 case KGSL_STATE_SLEEP:
755 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700757 break;
758 case KGSL_STATE_SUSPEND:
759 kgsl_check_suspended(device);
760 break;
761 case KGSL_STATE_INIT:
762 case KGSL_STATE_HUNG:
763 case KGSL_STATE_DUMP_AND_RECOVER:
764 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
765 &device->pwrctrl.power_flags))
766 break;
767 else
768 KGSL_PWR_ERR(device,
769 "hw access while clocks off from state %d\n",
770 device->state);
771 break;
772 default:
773 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
774 device->state);
775 break;
776 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777}
778EXPORT_SYMBOL(kgsl_pre_hwaccess);
779
780void kgsl_check_suspended(struct kgsl_device *device)
781{
782 if (device->requested_state == KGSL_STATE_SUSPEND ||
783 device->state == KGSL_STATE_SUSPEND) {
784 mutex_unlock(&device->mutex);
785 wait_for_completion(&device->hwaccess_gate);
786 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700787 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 mutex_unlock(&device->mutex);
789 wait_for_completion(&device->recovery_gate);
790 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700791 } else if (device->state == KGSL_STATE_SLUMBER)
792 kgsl_pwrctrl_wake(device);
793}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794
Suman Tatiraju24569022011-10-27 11:11:12 -0700795static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700796_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700797{
Suman Tatiraju24569022011-10-27 11:11:12 -0700798 switch (device->state) {
799 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700800 if (!device->ftbl->isidle(device)) {
801 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
802 return -EBUSY;
803 }
804 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600805 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
806 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Suman Tatiraju24569022011-10-27 11:11:12 -0700807 case KGSL_STATE_NAP:
808 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700809 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700810 break;
811 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700812 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700813 break;
814 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700815 return 0;
816}
817
818static void
819_sleep_accounting(struct kgsl_device *device)
820{
821 kgsl_pwrctrl_busy_time(device, false);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800822 device->pwrctrl.clk_stats.start = ktime_set(0, 0);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700823 device->pwrctrl.time = 0;
824 kgsl_pwrscale_sleep(device);
825}
826
827static int
828_sleep(struct kgsl_device *device)
829{
830 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
831 switch (device->state) {
832 case KGSL_STATE_ACTIVE:
833 if (!device->ftbl->isidle(device)) {
834 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
835 return -EBUSY;
836 }
837 /* fall through */
838 case KGSL_STATE_NAP:
839 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
840 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
841 if (pwr->pwrlevels[0].gpu_freq > 0)
842 clk_set_rate(pwr->grp_clks[0],
843 pwr->pwrlevels[pwr->num_pwrlevels - 1].
844 gpu_freq);
845 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600846 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700847 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -0700848 pm_qos_update_request(&device->pm_qos_req_dma,
849 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700850 break;
851 case KGSL_STATE_SLEEP:
852 case KGSL_STATE_SLUMBER:
853 break;
854 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700855 KGSL_PWR_WARN(device, "unhandled state %s\n",
856 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700857 break;
858 }
859 return 0;
860}
861
862static int
863_slumber(struct kgsl_device *device)
864{
865 switch (device->state) {
866 case KGSL_STATE_ACTIVE:
867 if (!device->ftbl->isidle(device)) {
868 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700869 return -EBUSY;
870 }
871 /* fall through */
872 case KGSL_STATE_NAP:
873 case KGSL_STATE_SLEEP:
874 del_timer_sync(&device->idle_timer);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700875 device->ftbl->suspend_context(device);
876 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700877 _sleep_accounting(device);
878 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700879 pm_qos_update_request(&device->pm_qos_req_dma,
880 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700881 break;
882 case KGSL_STATE_SLUMBER:
883 break;
884 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700885 KGSL_PWR_WARN(device, "unhandled state %s\n",
886 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700887 break;
888 }
889 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700890}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891
892/******************************************************************/
893/* Caller must hold the device mutex. */
894int kgsl_pwrctrl_sleep(struct kgsl_device *device)
895{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700896 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
898
899 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700900 switch (device->requested_state) {
901 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700902 status = _nap(device);
903 break;
904 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700905 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700906 break;
907 case KGSL_STATE_SLUMBER:
908 status = _slumber(device);
909 break;
910 default:
911 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
912 device->requested_state);
913 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
914 status = -EINVAL;
915 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700917 return status;
918}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700919EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921/******************************************************************/
922/* Caller must hold the device mutex. */
923void kgsl_pwrctrl_wake(struct kgsl_device *device)
924{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700925 int status;
926 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
927 switch (device->state) {
928 case KGSL_STATE_SLUMBER:
929 status = device->ftbl->start(device, 0);
930 if (status) {
931 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
932 KGSL_DRV_ERR(device, "start failed %d\n", status);
933 break;
934 }
935 /* fall through */
936 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
938 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700939 /* fall through */
940 case KGSL_STATE_NAP:
941 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600942 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700943 /* Enable state before turning on irq */
944 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
945 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
946 /* Re-enable HW access */
947 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948 jiffies + device->pwrctrl.interval_timeout);
Devin Kim66ad4c02012-09-21 20:28:50 -0700949 pm_qos_update_request(&device->pm_qos_req_dma,
950 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700951 case KGSL_STATE_ACTIVE:
952 break;
953 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700954 KGSL_PWR_WARN(device, "unhandled state %s\n",
955 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700956 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
957 break;
958 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959}
960EXPORT_SYMBOL(kgsl_pwrctrl_wake);
961
962void kgsl_pwrctrl_enable(struct kgsl_device *device)
963{
964 /* Order pwrrail/clk sequence based upon platform */
965 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600966 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
968}
969EXPORT_SYMBOL(kgsl_pwrctrl_enable);
970
971void kgsl_pwrctrl_disable(struct kgsl_device *device)
972{
973 /* Order pwrrail/clk sequence based upon platform */
974 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600975 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
977}
978EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700979
980void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
981{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700982 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700983 device->state = state;
984 device->requested_state = KGSL_STATE_NONE;
985}
986EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
987
988void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
989{
990 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700991 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700992 device->requested_state = state;
993}
994EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700995
996const char *kgsl_pwrstate_to_str(unsigned int state)
997{
998 switch (state) {
999 case KGSL_STATE_NONE:
1000 return "NONE";
1001 case KGSL_STATE_INIT:
1002 return "INIT";
1003 case KGSL_STATE_ACTIVE:
1004 return "ACTIVE";
1005 case KGSL_STATE_NAP:
1006 return "NAP";
1007 case KGSL_STATE_SLEEP:
1008 return "SLEEP";
1009 case KGSL_STATE_SUSPEND:
1010 return "SUSPEND";
1011 case KGSL_STATE_HUNG:
1012 return "HUNG";
1013 case KGSL_STATE_DUMP_AND_RECOVER:
1014 return "DNR";
1015 case KGSL_STATE_SLUMBER:
1016 return "SLUMBER";
1017 default:
1018 break;
1019 }
1020 return "UNKNOWN";
1021}
1022EXPORT_SYMBOL(kgsl_pwrstate_to_str);
1023