blob: 739dcb5d12d1ede70b5847672b011e15b4558e5b [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080020#include <linux/ktime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_pwrscale.h"
24#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070025#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060027#define KGSL_PWRFLAGS_POWER_ON 0
28#define KGSL_PWRFLAGS_CLK_ON 1
29#define KGSL_PWRFLAGS_AXI_ON 2
30#define KGSL_PWRFLAGS_IRQ_ON 3
31
Lucille Sylvester10297892012-02-27 13:54:47 -070032#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070033#define UPDATE_BUSY_VAL 1000000
34#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060036struct clk_pair {
37 const char *name;
38 uint map;
39};
40
41struct clk_pair clks[KGSL_MAX_CLKS] = {
42 {
43 .name = "src_clk",
44 .map = KGSL_CLK_SRC,
45 },
46 {
47 .name = "core_clk",
48 .map = KGSL_CLK_CORE,
49 },
50 {
51 .name = "iface_clk",
52 .map = KGSL_CLK_IFACE,
53 },
54 {
55 .name = "mem_clk",
56 .map = KGSL_CLK_MEM,
57 },
58 {
59 .name = "mem_iface_clk",
60 .map = KGSL_CLK_MEM_IFACE,
61 },
62};
63
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080064/* Update the elapsed time at a particular clock level
65 * if the device is active(on_time = true).Otherwise
66 * store it as sleep time.
67 */
68static void update_clk_statistics(struct kgsl_device *device,
69 bool on_time)
70{
71 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
72 struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
73 ktime_t elapsed;
74 int elapsed_us;
75 if (clkstats->start.tv64 == 0)
76 clkstats->start = ktime_get();
77 clkstats->stop = ktime_get();
78 elapsed = ktime_sub(clkstats->stop, clkstats->start);
79 elapsed_us = ktime_to_us(elapsed);
80 clkstats->elapsed += elapsed_us;
81 if (on_time)
82 clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
83 else
84 clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
85 clkstats->start = ktime_get();
86}
87
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
89 unsigned int new_level)
90{
91 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
92 if (new_level < (pwr->num_pwrlevels - 1) &&
93 new_level >= pwr->thermal_pwrlevel &&
94 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070095 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060096 int diff = new_level - pwr->active_pwrlevel;
97 int d = (diff > 0) ? 1 : -1;
98 int level = pwr->active_pwrlevel;
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080099 /* Update the clock stats */
100 update_clk_statistics(device, true);
101 /* Finally set active level */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -0600103 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
Kedar Joshic11d0982012-02-07 10:59:49 +0530104 (device->state == KGSL_STATE_NAP)) {
105 /*
106 * On some platforms, instability is caused on
107 * changing clock freq when the core is busy.
108 * Idle the gpu core before changing the clock freq.
109 */
110 if (pwr->idle_needed == true)
Jordan Crousea29a2e02012-08-14 09:09:23 -0600111 device->ftbl->idle(device);
112
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -0600113 /* Don't shift by more than one level at a time to
114 * avoid glitches.
115 */
116 while (level != new_level) {
117 level += d;
118 clk_set_rate(pwr->grp_clks[0],
119 pwr->pwrlevels[level].gpu_freq);
120 }
Kedar Joshic11d0982012-02-07 10:59:49 +0530121 }
Lucille Sylvester622927a2011-08-10 14:42:25 -0600122 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 if (pwr->pcl)
124 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700125 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -0600126 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700127 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -0600128 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700129 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
130 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 }
132}
133EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
134
135static int __gpuclk_store(int max, struct device *dev,
136 struct device_attribute *attr,
137 const char *buf, size_t count)
138{ int ret, i, delta = 5000000;
139 unsigned long val;
140 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600141 struct kgsl_pwrctrl *pwr;
142
143 if (device == NULL)
144 return 0;
145 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146
147 ret = sscanf(buf, "%ld", &val);
148 if (ret != 1)
149 return count;
150
151 mutex_lock(&device->mutex);
Lucille Sylvesterb7626dc62012-06-28 18:46:24 -0600152 for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
154 if (max)
155 pwr->thermal_pwrlevel = i;
156 break;
157 }
158 }
159
Lucille Sylvesterb7626dc62012-06-28 18:46:24 -0600160 if (i == (pwr->num_pwrlevels - 1))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161 goto done;
162
163 /*
164 * If the current or requested clock speed is greater than the
165 * thermal limit, bump down immediately.
166 */
167
168 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
169 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
170 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
171 else if (!max)
172 kgsl_pwrctrl_pwrlevel_change(device, i);
173
174done:
175 mutex_unlock(&device->mutex);
176 return count;
177}
178
179static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
180 struct device_attribute *attr,
181 const char *buf, size_t count)
182{
183 return __gpuclk_store(1, dev, attr, buf, count);
184}
185
186static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
187 struct device_attribute *attr,
188 char *buf)
189{
190 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600191 struct kgsl_pwrctrl *pwr;
192 if (device == NULL)
193 return 0;
194 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 return snprintf(buf, PAGE_SIZE, "%d\n",
196 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
197}
198
199static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
200 struct device_attribute *attr,
201 const char *buf, size_t count)
202{
203 return __gpuclk_store(0, dev, attr, buf, count);
204}
205
206static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209{
210 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600211 struct kgsl_pwrctrl *pwr;
212 if (device == NULL)
213 return 0;
214 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 return snprintf(buf, PAGE_SIZE, "%d\n",
216 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
217}
218
219static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
220 struct device_attribute *attr,
221 const char *buf, size_t count)
222{
223 char temp[20];
224 unsigned long val;
225 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600226 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 int rc;
228
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600229 if (device == NULL)
230 return 0;
231 pwr = &device->pwrctrl;
232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 snprintf(temp, sizeof(temp), "%.*s",
234 (int)min(count, sizeof(temp) - 1), buf);
235 rc = strict_strtoul(temp, 0, &val);
236 if (rc)
237 return rc;
238
239 mutex_lock(&device->mutex);
240
241 if (val == 1)
242 pwr->nap_allowed = true;
243 else if (val == 0)
244 pwr->nap_allowed = false;
245
246 mutex_unlock(&device->mutex);
247
248 return count;
249}
250
251static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
252 struct device_attribute *attr,
253 char *buf)
254{
255 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600256 if (device == NULL)
257 return 0;
258 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259}
260
261
262static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
263 struct device_attribute *attr,
264 const char *buf, size_t count)
265{
266 char temp[20];
267 unsigned long val;
268 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600269 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270 const long div = 1000/HZ;
271 static unsigned int org_interval_timeout = 1;
272 int rc;
273
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600274 if (device == NULL)
275 return 0;
276 pwr = &device->pwrctrl;
277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 snprintf(temp, sizeof(temp), "%.*s",
279 (int)min(count, sizeof(temp) - 1), buf);
280 rc = strict_strtoul(temp, 0, &val);
281 if (rc)
282 return rc;
283
284 if (org_interval_timeout == 1)
285 org_interval_timeout = pwr->interval_timeout;
286
287 mutex_lock(&device->mutex);
288
289 /* Let the timeout be requested in ms, but convert to jiffies. */
290 val /= div;
291 if (val >= org_interval_timeout)
292 pwr->interval_timeout = val;
293
294 mutex_unlock(&device->mutex);
295
296 return count;
297}
298
299static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
300 struct device_attribute *attr,
301 char *buf)
302{
303 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600304 if (device == NULL)
305 return 0;
306 return snprintf(buf, PAGE_SIZE, "%d\n",
307 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308}
309
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700310static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
311 struct device_attribute *attr,
312 char *buf)
313{
314 int ret;
315 struct kgsl_device *device = kgsl_device_from_dev(dev);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800316 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
317 ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
318 clkstats->on_time_old, clkstats->elapsed_old);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700319 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800320 clkstats->on_time_old = 0;
321 clkstats->elapsed_old = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700322 }
323 return ret;
324}
325
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800326static int kgsl_pwrctrl_gputop_show(struct device *dev,
327 struct device_attribute *attr,
328 char *buf)
329{
330 int ret;
331 struct kgsl_device *device = kgsl_device_from_dev(dev);
332 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
333 int i = 0;
334 char *ptr = buf;
335
336 ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
337 clkstats->elapsed_old);
338 for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
339 i++, ptr += ret)
340 ret = snprintf(ptr, PAGE_SIZE, "%7d ",
341 clkstats->old_clock_time[i]);
342
343 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
344 clkstats->on_time_old = 0;
345 clkstats->elapsed_old = 0;
346 for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
347 clkstats->old_clock_time[i] = 0;
348 }
349 return (unsigned int) (ptr - buf);
350}
351
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530352static int kgsl_pwrctrl_gpu_available_frequencies_show(
353 struct device *dev,
354 struct device_attribute *attr,
355 char *buf)
356{
357 struct kgsl_device *device = kgsl_device_from_dev(dev);
358 struct kgsl_pwrctrl *pwr;
359 int index, num_chars = 0;
360
361 if (device == NULL)
362 return 0;
363 pwr = &device->pwrctrl;
364 for (index = 0; index < pwr->num_pwrlevels - 1; index++)
365 num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
366 pwr->pwrlevels[index].gpu_freq);
367 buf[num_chars++] = '\n';
368 return num_chars;
369}
370
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
372DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
373 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530374DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
376 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800377DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
378 NULL);
379DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700380 NULL);
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530381DEVICE_ATTR(gpu_available_frequencies, 0444,
382 kgsl_pwrctrl_gpu_available_frequencies_show,
383 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
385static const struct device_attribute *pwrctrl_attr_list[] = {
386 &dev_attr_gpuclk,
387 &dev_attr_max_gpuclk,
388 &dev_attr_pwrnap,
389 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700390 &dev_attr_gpubusy,
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800391 &dev_attr_gputop,
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530392 &dev_attr_gpu_available_frequencies,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 NULL
394};
395
396int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
397{
398 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
399}
400
401void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
402{
403 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
404}
405
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800406static void update_statistics(struct kgsl_device *device)
407{
408 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
409 unsigned int on_time = 0;
410 int i;
411 int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
412 /*PER CLK TIME*/
413 for (i = 0; i < num_pwrlevels; i++) {
414 clkstats->old_clock_time[i] = clkstats->clock_time[i];
415 on_time += clkstats->clock_time[i];
416 clkstats->clock_time[i] = 0;
417 }
418 clkstats->old_clock_time[num_pwrlevels] =
419 clkstats->clock_time[num_pwrlevels];
420 clkstats->clock_time[num_pwrlevels] = 0;
421 clkstats->on_time_old = on_time;
422 clkstats->elapsed_old = clkstats->elapsed;
423 clkstats->elapsed = 0;
424}
425
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700426/* Track the amount of time the gpu is on vs the total system time. *
427 * Regularly update the percentage of busy time displayed by sysfs. */
428static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
429{
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800430 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
431 update_clk_statistics(device, on_time);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700432 /* Update the output regularly and reset the counters. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800433 if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700434 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800435 update_statistics(device);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700436 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700437}
438
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600439void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
440 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441{
442 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
443 int i = 0;
444 if (state == KGSL_PWRFLAGS_OFF) {
445 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
446 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700447 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
449 if (pwr->grp_clks[i])
450 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700451 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700453 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 clk_set_rate(pwr->grp_clks[0],
455 pwr->pwrlevels[pwr->num_pwrlevels - 1].
456 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700457 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
458 if (pwr->grp_clks[i])
459 clk_unprepare(pwr->grp_clks[i]);
460 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700461 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju426da0d2012-09-11 13:03:28 -0700462 } else if (requested_state == KGSL_STATE_SLEEP) {
463 /* High latency clock maintenance. */
464 if ((pwr->pwrlevels[0].gpu_freq > 0))
465 clk_set_rate(pwr->grp_clks[0],
466 pwr->pwrlevels[pwr->num_pwrlevels - 1].
467 gpu_freq);
468 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
469 if (pwr->grp_clks[i])
470 clk_unprepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 }
472 } else if (state == KGSL_PWRFLAGS_ON) {
473 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
474 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700475 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700476 /* High latency clock maintenance. */
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530477 if (device->state != KGSL_STATE_NAP) {
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700478 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
479 if (pwr->grp_clks[i])
480 clk_prepare(pwr->grp_clks[i]);
Ranjhith Kalisamyc9406992012-08-10 16:40:36 +0530481
482 if (pwr->pwrlevels[0].gpu_freq > 0)
483 clk_set_rate(pwr->grp_clks[0],
484 pwr->pwrlevels
485 [pwr->active_pwrlevel].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700487 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 /* as last step, enable grp_clk
489 this is to let GPU interrupt to come */
490 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
491 if (pwr->grp_clks[i])
492 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700493 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 }
495 }
496}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497
498void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
499{
500 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
501
502 if (state == KGSL_PWRFLAGS_OFF) {
503 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
504 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700505 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530506 if (pwr->ebi1_clk) {
507 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600508 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530509 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 if (pwr->pcl)
511 msm_bus_scale_client_update_request(pwr->pcl,
512 0);
513 }
514 } else if (state == KGSL_PWRFLAGS_ON) {
515 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
516 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700517 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530518 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600519 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530520 clk_set_rate(pwr->ebi1_clk,
521 pwr->pwrlevels[pwr->active_pwrlevel].
522 bus_freq);
523 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 if (pwr->pcl)
525 msm_bus_scale_client_update_request(pwr->pcl,
526 pwr->pwrlevels[pwr->active_pwrlevel].
527 bus_freq);
528 }
529 }
530}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531
532void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
533{
534 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
535
536 if (state == KGSL_PWRFLAGS_OFF) {
537 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
538 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700539 trace_kgsl_rail(device, state);
Pu Chen12053782012-07-24 17:04:27 -0700540 if (pwr->gpu_cx)
541 regulator_disable(pwr->gpu_cx);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 if (pwr->gpu_reg)
543 regulator_disable(pwr->gpu_reg);
544 }
545 } else if (state == KGSL_PWRFLAGS_ON) {
546 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
547 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700548 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600549 if (pwr->gpu_reg) {
550 int status = regulator_enable(pwr->gpu_reg);
551 if (status)
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700552 KGSL_DRV_ERR(device,
553 "core regulator_enable "
554 "failed: %d\n",
555 status);
556 }
Pu Chen12053782012-07-24 17:04:27 -0700557 if (pwr->gpu_cx) {
558 int status = regulator_enable(pwr->gpu_cx);
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700559 if (status)
560 KGSL_DRV_ERR(device,
561 "cx regulator_enable "
562 "failed: %d\n",
563 status);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600564 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 }
566 }
567}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568
569void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
570{
571 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
572
573 if (state == KGSL_PWRFLAGS_ON) {
574 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
575 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700576 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 }
579 } else if (state == KGSL_PWRFLAGS_OFF) {
580 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
581 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700582 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600583 if (in_interrupt())
584 disable_irq_nosync(pwr->interrupt_num);
585 else
586 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 }
588 }
589}
590EXPORT_SYMBOL(kgsl_pwrctrl_irq);
591
592int kgsl_pwrctrl_init(struct kgsl_device *device)
593{
594 int i, result = 0;
595 struct clk *clk;
596 struct platform_device *pdev =
597 container_of(device->parentdev, struct platform_device, dev);
598 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600599 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600
601 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600602 for (i = 0; i < KGSL_MAX_CLKS; i++) {
603 if (pdata->clk_map & clks[i].map) {
604 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 if (IS_ERR(clk))
606 goto clk_err;
607 pwr->grp_clks[i] = clk;
608 }
609 }
610 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600611 if (pwr->grp_clks[0] == NULL)
612 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613
614 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600615 if (pdata->set_grp_async != NULL)
616 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600618 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600620 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 result = -EINVAL;
622 goto done;
623 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600624 pwr->num_pwrlevels = pdata->num_levels;
625 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800626 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600627 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600629 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600631 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 gpu_freq) : 0;
633 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600634 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600635 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600636 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 }
638 /* Do not set_rate for targets in sync with AXI */
639 if (pwr->pwrlevels[0].gpu_freq > 0)
640 clk_set_rate(pwr->grp_clks[0], pwr->
641 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
642
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700643 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 if (IS_ERR(pwr->gpu_reg))
645 pwr->gpu_reg = NULL;
646
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700647 if (pwr->gpu_reg) {
Pu Chen12053782012-07-24 17:04:27 -0700648 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
649 if (IS_ERR(pwr->gpu_cx))
650 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700651 } else
Pu Chen12053782012-07-24 17:04:27 -0700652 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 pwr->power_flags = 0;
655
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600656 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530657 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600658 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700659 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700660 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 if (IS_ERR(pwr->ebi1_clk))
662 pwr->ebi1_clk = NULL;
663 else
664 clk_set_rate(pwr->ebi1_clk,
665 pwr->pwrlevels[pwr->active_pwrlevel].
666 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600667 if (pdata->bus_scale_table != NULL) {
668 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 bus_scale_table);
670 if (!pwr->pcl) {
671 KGSL_PWR_ERR(device,
672 "msm_bus_scale_register_client failed: "
673 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600674 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675 result = -EINVAL;
676 goto done;
677 }
678 }
679
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700681 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682 register_early_suspend(&device->display_off);
683 return result;
684
685clk_err:
686 result = PTR_ERR(clk);
687 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600688 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689
690done:
691 return result;
692}
693
694void kgsl_pwrctrl_close(struct kgsl_device *device)
695{
696 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
697 int i;
698
699 KGSL_PWR_INFO(device, "close device %d\n", device->id);
700
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700701 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702 unregister_early_suspend(&device->display_off);
703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 clk_put(pwr->ebi1_clk);
705
706 if (pwr->pcl)
707 msm_bus_scale_unregister_client(pwr->pcl);
708
709 pwr->pcl = 0;
710
711 if (pwr->gpu_reg) {
712 regulator_put(pwr->gpu_reg);
713 pwr->gpu_reg = NULL;
714 }
715
Pu Chen12053782012-07-24 17:04:27 -0700716 if (pwr->gpu_cx) {
717 regulator_put(pwr->gpu_cx);
718 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700719 }
720
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 for (i = 1; i < KGSL_MAX_CLKS; i++)
722 if (pwr->grp_clks[i]) {
723 clk_put(pwr->grp_clks[i]);
724 pwr->grp_clks[i] = NULL;
725 }
726
727 pwr->grp_clks[0] = NULL;
728 pwr->power_flags = 0;
729}
730
731void kgsl_idle_check(struct work_struct *work)
732{
733 struct kgsl_device *device = container_of(work, struct kgsl_device,
734 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700735 WARN_ON(device == NULL);
736 if (device == NULL)
737 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738
739 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju3de11cc2012-06-20 10:33:32 -0700741 kgsl_pwrscale_idle(device, 0);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600742
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700743 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 mod_timer(&device->idle_timer,
745 jiffies +
746 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700747 /* If the GPU has been too busy to sleep, make sure *
748 * that is acurately reflected in the % busy numbers. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800749 device->pwrctrl.clk_stats.no_nap_cnt++;
750 if (device->pwrctrl.clk_stats.no_nap_cnt >
751 UPDATE_BUSY) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700752 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800753 device->pwrctrl.clk_stats.no_nap_cnt = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700754 }
755 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 } else if (device->state & (KGSL_STATE_HUNG |
757 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700758 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759 }
760
761 mutex_unlock(&device->mutex);
762}
763
764void kgsl_timer(unsigned long data)
765{
766 struct kgsl_device *device = (struct kgsl_device *) data;
767
768 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530769 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -0700770 if (device->pwrctrl.restore_slumber ||
771 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700772 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
773 else
774 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775 /* Have work run in a non-interrupt context. */
776 queue_work(device->work_queue, &device->idle_check_ws);
777 }
778}
779
780void kgsl_pre_hwaccess(struct kgsl_device *device)
781{
782 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700783 switch (device->state) {
784 case KGSL_STATE_ACTIVE:
785 return;
786 case KGSL_STATE_NAP:
787 case KGSL_STATE_SLEEP:
788 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700790 break;
791 case KGSL_STATE_SUSPEND:
792 kgsl_check_suspended(device);
793 break;
794 case KGSL_STATE_INIT:
795 case KGSL_STATE_HUNG:
796 case KGSL_STATE_DUMP_AND_RECOVER:
797 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
798 &device->pwrctrl.power_flags))
799 break;
800 else
801 KGSL_PWR_ERR(device,
802 "hw access while clocks off from state %d\n",
803 device->state);
804 break;
805 default:
806 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
807 device->state);
808 break;
809 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810}
811EXPORT_SYMBOL(kgsl_pre_hwaccess);
812
813void kgsl_check_suspended(struct kgsl_device *device)
814{
815 if (device->requested_state == KGSL_STATE_SUSPEND ||
816 device->state == KGSL_STATE_SUSPEND) {
817 mutex_unlock(&device->mutex);
818 wait_for_completion(&device->hwaccess_gate);
819 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700820 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 mutex_unlock(&device->mutex);
822 wait_for_completion(&device->recovery_gate);
823 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700824 } else if (device->state == KGSL_STATE_SLUMBER)
825 kgsl_pwrctrl_wake(device);
826}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827
Suman Tatiraju24569022011-10-27 11:11:12 -0700828static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700829_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700830{
Suman Tatiraju24569022011-10-27 11:11:12 -0700831 switch (device->state) {
832 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700833 if (!device->ftbl->isidle(device)) {
834 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
835 return -EBUSY;
836 }
837 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600838 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
839 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Suman Tatiraju24569022011-10-27 11:11:12 -0700840 case KGSL_STATE_NAP:
841 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700842 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700843 break;
844 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700845 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700846 break;
847 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700848 return 0;
849}
850
851static void
852_sleep_accounting(struct kgsl_device *device)
853{
854 kgsl_pwrctrl_busy_time(device, false);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800855 device->pwrctrl.clk_stats.start = ktime_set(0, 0);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700856 device->pwrctrl.time = 0;
857 kgsl_pwrscale_sleep(device);
858}
859
860static int
861_sleep(struct kgsl_device *device)
862{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700863 switch (device->state) {
864 case KGSL_STATE_ACTIVE:
865 if (!device->ftbl->isidle(device)) {
866 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
867 return -EBUSY;
868 }
869 /* fall through */
870 case KGSL_STATE_NAP:
871 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
872 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700873 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600874 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700875 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -0700876 pm_qos_update_request(&device->pm_qos_req_dma,
877 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700878 break;
879 case KGSL_STATE_SLEEP:
880 case KGSL_STATE_SLUMBER:
881 break;
882 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700883 KGSL_PWR_WARN(device, "unhandled state %s\n",
884 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700885 break;
886 }
887 return 0;
888}
889
890static int
891_slumber(struct kgsl_device *device)
892{
893 switch (device->state) {
894 case KGSL_STATE_ACTIVE:
895 if (!device->ftbl->isidle(device)) {
896 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700897 return -EBUSY;
898 }
899 /* fall through */
900 case KGSL_STATE_NAP:
901 case KGSL_STATE_SLEEP:
902 del_timer_sync(&device->idle_timer);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700903 device->ftbl->suspend_context(device);
904 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700905 _sleep_accounting(device);
906 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700907 pm_qos_update_request(&device->pm_qos_req_dma,
908 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700909 break;
910 case KGSL_STATE_SLUMBER:
911 break;
912 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700913 KGSL_PWR_WARN(device, "unhandled state %s\n",
914 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700915 break;
916 }
917 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700918}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919
920/******************************************************************/
921/* Caller must hold the device mutex. */
922int kgsl_pwrctrl_sleep(struct kgsl_device *device)
923{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700924 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
926
927 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700928 switch (device->requested_state) {
929 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700930 status = _nap(device);
931 break;
932 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700933 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700934 break;
935 case KGSL_STATE_SLUMBER:
936 status = _slumber(device);
937 break;
938 default:
939 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
940 device->requested_state);
941 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
942 status = -EINVAL;
943 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700945 return status;
946}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700947EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949/******************************************************************/
950/* Caller must hold the device mutex. */
951void kgsl_pwrctrl_wake(struct kgsl_device *device)
952{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700953 int status;
954 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
955 switch (device->state) {
956 case KGSL_STATE_SLUMBER:
957 status = device->ftbl->start(device, 0);
958 if (status) {
959 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
960 KGSL_DRV_ERR(device, "start failed %d\n", status);
961 break;
962 }
963 /* fall through */
964 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
966 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700967 /* fall through */
968 case KGSL_STATE_NAP:
969 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600970 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700971 /* Enable state before turning on irq */
972 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
973 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
974 /* Re-enable HW access */
975 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 jiffies + device->pwrctrl.interval_timeout);
Devin Kim66ad4c02012-09-21 20:28:50 -0700977 pm_qos_update_request(&device->pm_qos_req_dma,
978 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700979 case KGSL_STATE_ACTIVE:
980 break;
981 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700982 KGSL_PWR_WARN(device, "unhandled state %s\n",
983 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700984 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
985 break;
986 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987}
988EXPORT_SYMBOL(kgsl_pwrctrl_wake);
989
990void kgsl_pwrctrl_enable(struct kgsl_device *device)
991{
992 /* Order pwrrail/clk sequence based upon platform */
993 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600994 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
996}
997EXPORT_SYMBOL(kgsl_pwrctrl_enable);
998
999void kgsl_pwrctrl_disable(struct kgsl_device *device)
1000{
1001 /* Order pwrrail/clk sequence based upon platform */
1002 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -06001003 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
1005}
1006EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001007
1008void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
1009{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001010 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001011 device->state = state;
1012 device->requested_state = KGSL_STATE_NONE;
1013}
1014EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
1015
1016void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
1017{
1018 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001019 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001020 device->requested_state = state;
1021}
1022EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001023
1024const char *kgsl_pwrstate_to_str(unsigned int state)
1025{
1026 switch (state) {
1027 case KGSL_STATE_NONE:
1028 return "NONE";
1029 case KGSL_STATE_INIT:
1030 return "INIT";
1031 case KGSL_STATE_ACTIVE:
1032 return "ACTIVE";
1033 case KGSL_STATE_NAP:
1034 return "NAP";
1035 case KGSL_STATE_SLEEP:
1036 return "SLEEP";
1037 case KGSL_STATE_SUSPEND:
1038 return "SUSPEND";
1039 case KGSL_STATE_HUNG:
1040 return "HUNG";
1041 case KGSL_STATE_DUMP_AND_RECOVER:
1042 return "DNR";
1043 case KGSL_STATE_SLUMBER:
1044 return "SLUMBER";
1045 default:
1046 break;
1047 }
1048 return "UNKNOWN";
1049}
1050EXPORT_SYMBOL(kgsl_pwrstate_to_str);
1051