blob: e56510605c3f3e887982b24329ebeefe09985146 [file] [log] [blame]
Jeremy Gebbenb7bc9552012-01-09 13:32:49 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/interrupt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070016#include <asm/page.h>
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -070017#include <linux/pm_runtime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <mach/msm_iomap.h>
19#include <mach/msm_bus.h>
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080020#include <linux/ktime.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_pwrscale.h"
24#include "kgsl_device.h"
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070025#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Jeremy Gebbenb46f4152011-10-14 14:27:00 -060027#define KGSL_PWRFLAGS_POWER_ON 0
28#define KGSL_PWRFLAGS_CLK_ON 1
29#define KGSL_PWRFLAGS_AXI_ON 2
30#define KGSL_PWRFLAGS_IRQ_ON 3
31
Lucille Sylvester10297892012-02-27 13:54:47 -070032#define GPU_SWFI_LATENCY 3
Suman Tatiraju7fe62a32011-07-14 16:40:37 -070033#define UPDATE_BUSY_VAL 1000000
34#define UPDATE_BUSY 50
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -060036struct clk_pair {
37 const char *name;
38 uint map;
39};
40
41struct clk_pair clks[KGSL_MAX_CLKS] = {
42 {
43 .name = "src_clk",
44 .map = KGSL_CLK_SRC,
45 },
46 {
47 .name = "core_clk",
48 .map = KGSL_CLK_CORE,
49 },
50 {
51 .name = "iface_clk",
52 .map = KGSL_CLK_IFACE,
53 },
54 {
55 .name = "mem_clk",
56 .map = KGSL_CLK_MEM,
57 },
58 {
59 .name = "mem_iface_clk",
60 .map = KGSL_CLK_MEM_IFACE,
61 },
62};
63
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080064/* Update the elapsed time at a particular clock level
65 * if the device is active(on_time = true).Otherwise
66 * store it as sleep time.
67 */
68static void update_clk_statistics(struct kgsl_device *device,
69 bool on_time)
70{
71 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
72 struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
73 ktime_t elapsed;
74 int elapsed_us;
75 if (clkstats->start.tv64 == 0)
76 clkstats->start = ktime_get();
77 clkstats->stop = ktime_get();
78 elapsed = ktime_sub(clkstats->stop, clkstats->start);
79 elapsed_us = ktime_to_us(elapsed);
80 clkstats->elapsed += elapsed_us;
81 if (on_time)
82 clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
83 else
84 clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
85 clkstats->start = ktime_get();
86}
87
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
89 unsigned int new_level)
90{
91 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
92 if (new_level < (pwr->num_pwrlevels - 1) &&
93 new_level >= pwr->thermal_pwrlevel &&
94 new_level != pwr->active_pwrlevel) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -070095 struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -060096 int diff = new_level - pwr->active_pwrlevel;
97 int d = (diff > 0) ? 1 : -1;
98 int level = pwr->active_pwrlevel;
Suman Tatiraju2bdd0562012-01-26 14:49:46 -080099 /* Update the clock stats */
100 update_clk_statistics(device, true);
101 /* Finally set active level */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 pwr->active_pwrlevel = new_level;
Lucille Sylvestercd42c822011-09-08 17:37:26 -0600103 if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
Kedar Joshic11d0982012-02-07 10:59:49 +0530104 (device->state == KGSL_STATE_NAP)) {
105 /*
106 * On some platforms, instability is caused on
107 * changing clock freq when the core is busy.
108 * Idle the gpu core before changing the clock freq.
109 */
110 if (pwr->idle_needed == true)
Jordan Crousea29a2e02012-08-14 09:09:23 -0600111 device->ftbl->idle(device);
112
Lucille Sylvester8d2ff3b2012-04-12 15:05:51 -0600113 /* Don't shift by more than one level at a time to
114 * avoid glitches.
115 */
116 while (level != new_level) {
117 level += d;
118 clk_set_rate(pwr->grp_clks[0],
119 pwr->pwrlevels[level].gpu_freq);
120 }
Kedar Joshic11d0982012-02-07 10:59:49 +0530121 }
Lucille Sylvester622927a2011-08-10 14:42:25 -0600122 if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 if (pwr->pcl)
124 msm_bus_scale_client_update_request(pwr->pcl,
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700125 pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -0600126 else if (pwr->ebi1_clk)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700127 clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
Lucille Sylvester622927a2011-08-10 14:42:25 -0600128 }
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700129 trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
130 pwrlevel->gpu_freq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 }
132}
133EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
134
135static int __gpuclk_store(int max, struct device *dev,
136 struct device_attribute *attr,
137 const char *buf, size_t count)
138{ int ret, i, delta = 5000000;
139 unsigned long val;
140 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600141 struct kgsl_pwrctrl *pwr;
142
143 if (device == NULL)
144 return 0;
145 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146
147 ret = sscanf(buf, "%ld", &val);
148 if (ret != 1)
149 return count;
150
151 mutex_lock(&device->mutex);
Lucille Sylvesterb7626dc62012-06-28 18:46:24 -0600152 for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
154 if (max)
155 pwr->thermal_pwrlevel = i;
156 break;
157 }
158 }
159
Lucille Sylvesterb7626dc62012-06-28 18:46:24 -0600160 if (i == (pwr->num_pwrlevels - 1))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161 goto done;
162
163 /*
164 * If the current or requested clock speed is greater than the
165 * thermal limit, bump down immediately.
166 */
167
168 if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
169 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
170 kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
171 else if (!max)
172 kgsl_pwrctrl_pwrlevel_change(device, i);
173
174done:
175 mutex_unlock(&device->mutex);
176 return count;
177}
178
179static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
180 struct device_attribute *attr,
181 const char *buf, size_t count)
182{
183 return __gpuclk_store(1, dev, attr, buf, count);
184}
185
186static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
187 struct device_attribute *attr,
188 char *buf)
189{
190 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600191 struct kgsl_pwrctrl *pwr;
192 if (device == NULL)
193 return 0;
194 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 return snprintf(buf, PAGE_SIZE, "%d\n",
196 pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
197}
198
199static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
200 struct device_attribute *attr,
201 const char *buf, size_t count)
202{
203 return __gpuclk_store(0, dev, attr, buf, count);
204}
205
206static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209{
210 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600211 struct kgsl_pwrctrl *pwr;
212 if (device == NULL)
213 return 0;
214 pwr = &device->pwrctrl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 return snprintf(buf, PAGE_SIZE, "%d\n",
216 pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
217}
218
219static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
220 struct device_attribute *attr,
221 const char *buf, size_t count)
222{
223 char temp[20];
224 unsigned long val;
225 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600226 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 int rc;
228
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600229 if (device == NULL)
230 return 0;
231 pwr = &device->pwrctrl;
232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 snprintf(temp, sizeof(temp), "%.*s",
234 (int)min(count, sizeof(temp) - 1), buf);
235 rc = strict_strtoul(temp, 0, &val);
236 if (rc)
237 return rc;
238
239 mutex_lock(&device->mutex);
240
241 if (val == 1)
242 pwr->nap_allowed = true;
243 else if (val == 0)
244 pwr->nap_allowed = false;
245
246 mutex_unlock(&device->mutex);
247
248 return count;
249}
250
251static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
252 struct device_attribute *attr,
253 char *buf)
254{
255 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600256 if (device == NULL)
257 return 0;
258 return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259}
260
261
262static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
263 struct device_attribute *attr,
264 const char *buf, size_t count)
265{
266 char temp[20];
267 unsigned long val;
268 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600269 struct kgsl_pwrctrl *pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270 const long div = 1000/HZ;
271 static unsigned int org_interval_timeout = 1;
272 int rc;
273
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600274 if (device == NULL)
275 return 0;
276 pwr = &device->pwrctrl;
277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 snprintf(temp, sizeof(temp), "%.*s",
279 (int)min(count, sizeof(temp) - 1), buf);
280 rc = strict_strtoul(temp, 0, &val);
281 if (rc)
282 return rc;
283
284 if (org_interval_timeout == 1)
285 org_interval_timeout = pwr->interval_timeout;
286
287 mutex_lock(&device->mutex);
288
289 /* Let the timeout be requested in ms, but convert to jiffies. */
290 val /= div;
291 if (val >= org_interval_timeout)
292 pwr->interval_timeout = val;
293
294 mutex_unlock(&device->mutex);
295
296 return count;
297}
298
299static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
300 struct device_attribute *attr,
301 char *buf)
302{
303 struct kgsl_device *device = kgsl_device_from_dev(dev);
Jordan Crouse6c2992a2011-08-08 17:00:06 -0600304 if (device == NULL)
305 return 0;
306 return snprintf(buf, PAGE_SIZE, "%d\n",
307 device->pwrctrl.interval_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308}
309
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700310static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
311 struct device_attribute *attr,
312 char *buf)
313{
314 int ret;
315 struct kgsl_device *device = kgsl_device_from_dev(dev);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800316 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
317 ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
318 clkstats->on_time_old, clkstats->elapsed_old);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700319 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800320 clkstats->on_time_old = 0;
321 clkstats->elapsed_old = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700322 }
323 return ret;
324}
325
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800326static int kgsl_pwrctrl_gputop_show(struct device *dev,
327 struct device_attribute *attr,
328 char *buf)
329{
330 int ret;
331 struct kgsl_device *device = kgsl_device_from_dev(dev);
332 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
333 int i = 0;
334 char *ptr = buf;
335
336 ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
337 clkstats->elapsed_old);
338 for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
339 i++, ptr += ret)
340 ret = snprintf(ptr, PAGE_SIZE, "%7d ",
341 clkstats->old_clock_time[i]);
342
343 if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
344 clkstats->on_time_old = 0;
345 clkstats->elapsed_old = 0;
346 for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
347 clkstats->old_clock_time[i] = 0;
348 }
349 return (unsigned int) (ptr - buf);
350}
351
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530352static int kgsl_pwrctrl_gpu_available_frequencies_show(
353 struct device *dev,
354 struct device_attribute *attr,
355 char *buf)
356{
357 struct kgsl_device *device = kgsl_device_from_dev(dev);
358 struct kgsl_pwrctrl *pwr;
359 int index, num_chars = 0;
360
361 if (device == NULL)
362 return 0;
363 pwr = &device->pwrctrl;
364 for (index = 0; index < pwr->num_pwrlevels - 1; index++)
365 num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
366 pwr->pwrlevels[index].gpu_freq);
367 buf[num_chars++] = '\n';
368 return num_chars;
369}
370
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
372DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
373 kgsl_pwrctrl_max_gpuclk_store);
Praveena Pachipulusu263467d2011-12-22 18:07:16 +0530374DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
376 kgsl_pwrctrl_idle_timer_store);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800377DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
378 NULL);
379DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700380 NULL);
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530381DEVICE_ATTR(gpu_available_frequencies, 0444,
382 kgsl_pwrctrl_gpu_available_frequencies_show,
383 NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
385static const struct device_attribute *pwrctrl_attr_list[] = {
386 &dev_attr_gpuclk,
387 &dev_attr_max_gpuclk,
388 &dev_attr_pwrnap,
389 &dev_attr_idle_timer,
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700390 &dev_attr_gpubusy,
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800391 &dev_attr_gputop,
Anshuman Dani91ede1e2012-08-21 14:44:38 +0530392 &dev_attr_gpu_available_frequencies,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 NULL
394};
395
396int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
397{
398 return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
399}
400
401void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
402{
403 kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
404}
405
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800406static void update_statistics(struct kgsl_device *device)
407{
408 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
409 unsigned int on_time = 0;
410 int i;
411 int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
412 /*PER CLK TIME*/
413 for (i = 0; i < num_pwrlevels; i++) {
414 clkstats->old_clock_time[i] = clkstats->clock_time[i];
415 on_time += clkstats->clock_time[i];
416 clkstats->clock_time[i] = 0;
417 }
418 clkstats->old_clock_time[num_pwrlevels] =
419 clkstats->clock_time[num_pwrlevels];
420 clkstats->clock_time[num_pwrlevels] = 0;
421 clkstats->on_time_old = on_time;
422 clkstats->elapsed_old = clkstats->elapsed;
423 clkstats->elapsed = 0;
424}
425
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700426/* Track the amount of time the gpu is on vs the total system time. *
427 * Regularly update the percentage of busy time displayed by sysfs. */
428static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
429{
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800430 struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
431 update_clk_statistics(device, on_time);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700432 /* Update the output regularly and reset the counters. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800433 if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700434 !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800435 update_statistics(device);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700436 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700437}
438
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600439void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
440 int requested_state)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441{
442 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
443 int i = 0;
444 if (state == KGSL_PWRFLAGS_OFF) {
445 if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
446 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700447 trace_kgsl_clk(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
449 if (pwr->grp_clks[i])
450 clk_disable(pwr->grp_clks[i]);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700451 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700453 (requested_state != KGSL_STATE_NAP)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 clk_set_rate(pwr->grp_clks[0],
455 pwr->pwrlevels[pwr->num_pwrlevels - 1].
456 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700457 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
458 if (pwr->grp_clks[i])
459 clk_unprepare(pwr->grp_clks[i]);
460 }
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700461 kgsl_pwrctrl_busy_time(device, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462 }
463 } else if (state == KGSL_PWRFLAGS_ON) {
464 if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
465 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700466 trace_kgsl_clk(device, state);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700467 /* High latency clock maintenance. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468 if ((pwr->pwrlevels[0].gpu_freq > 0) &&
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700469 (device->state != KGSL_STATE_NAP)) {
470 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
471 if (pwr->grp_clks[i])
472 clk_prepare(pwr->grp_clks[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 clk_set_rate(pwr->grp_clks[0],
474 pwr->pwrlevels[pwr->active_pwrlevel].
475 gpu_freq);
Lucille Sylvester5b7e57b2012-01-19 17:18:40 -0700476 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477
478 /* as last step, enable grp_clk
479 this is to let GPU interrupt to come */
480 for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
481 if (pwr->grp_clks[i])
482 clk_enable(pwr->grp_clks[i]);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700483 kgsl_pwrctrl_busy_time(device, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 }
485 }
486}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487
488void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
489{
490 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
491
492 if (state == KGSL_PWRFLAGS_OFF) {
493 if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
494 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700495 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530496 if (pwr->ebi1_clk) {
497 clk_set_rate(pwr->ebi1_clk, 0);
Lucille Sylvester064d5982012-05-08 15:42:43 -0600498 clk_disable_unprepare(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530499 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 if (pwr->pcl)
501 msm_bus_scale_client_update_request(pwr->pcl,
502 0);
503 }
504 } else if (state == KGSL_PWRFLAGS_ON) {
505 if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
506 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700507 trace_kgsl_bus(device, state);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530508 if (pwr->ebi1_clk) {
Lucille Sylvester064d5982012-05-08 15:42:43 -0600509 clk_prepare_enable(pwr->ebi1_clk);
Lynus Vaz5a641cc2011-09-15 14:43:40 +0530510 clk_set_rate(pwr->ebi1_clk,
511 pwr->pwrlevels[pwr->active_pwrlevel].
512 bus_freq);
513 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 if (pwr->pcl)
515 msm_bus_scale_client_update_request(pwr->pcl,
516 pwr->pwrlevels[pwr->active_pwrlevel].
517 bus_freq);
518 }
519 }
520}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521
522void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
523{
524 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
525
526 if (state == KGSL_PWRFLAGS_OFF) {
527 if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
528 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700529 trace_kgsl_rail(device, state);
Pu Chen12053782012-07-24 17:04:27 -0700530 if (pwr->gpu_cx)
531 regulator_disable(pwr->gpu_cx);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 if (pwr->gpu_reg)
533 regulator_disable(pwr->gpu_reg);
534 }
535 } else if (state == KGSL_PWRFLAGS_ON) {
536 if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
537 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700538 trace_kgsl_rail(device, state);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600539 if (pwr->gpu_reg) {
540 int status = regulator_enable(pwr->gpu_reg);
541 if (status)
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700542 KGSL_DRV_ERR(device,
543 "core regulator_enable "
544 "failed: %d\n",
545 status);
546 }
Pu Chen12053782012-07-24 17:04:27 -0700547 if (pwr->gpu_cx) {
548 int status = regulator_enable(pwr->gpu_cx);
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700549 if (status)
550 KGSL_DRV_ERR(device,
551 "cx regulator_enable "
552 "failed: %d\n",
553 status);
Shubhraprakash Dase86ba5c2012-04-04 18:03:27 -0600554 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 }
556 }
557}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558
559void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
560{
561 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
562
563 if (state == KGSL_PWRFLAGS_ON) {
564 if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
565 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700566 trace_kgsl_irq(device, state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567 enable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 }
569 } else if (state == KGSL_PWRFLAGS_OFF) {
570 if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
571 &pwr->power_flags)) {
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700572 trace_kgsl_irq(device, state);
Jordan Crouseb58e61b2011-08-08 13:25:36 -0600573 if (in_interrupt())
574 disable_irq_nosync(pwr->interrupt_num);
575 else
576 disable_irq(pwr->interrupt_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 }
578 }
579}
580EXPORT_SYMBOL(kgsl_pwrctrl_irq);
581
582int kgsl_pwrctrl_init(struct kgsl_device *device)
583{
584 int i, result = 0;
585 struct clk *clk;
586 struct platform_device *pdev =
587 container_of(device->parentdev, struct platform_device, dev);
588 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600589 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590
591 /*acquire clocks */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600592 for (i = 0; i < KGSL_MAX_CLKS; i++) {
593 if (pdata->clk_map & clks[i].map) {
594 clk = clk_get(&pdev->dev, clks[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 if (IS_ERR(clk))
596 goto clk_err;
597 pwr->grp_clks[i] = clk;
598 }
599 }
600 /* Make sure we have a source clk for freq setting */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600601 if (pwr->grp_clks[0] == NULL)
602 pwr->grp_clks[0] = pwr->grp_clks[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603
604 /* put the AXI bus into asynchronous mode with the graphics cores */
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600605 if (pdata->set_grp_async != NULL)
606 pdata->set_grp_async();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600608 if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 KGSL_PWR_ERR(device, "invalid power level count: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600610 pdata->num_levels);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 result = -EINVAL;
612 goto done;
613 }
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600614 pwr->num_pwrlevels = pdata->num_levels;
615 pwr->active_pwrlevel = pdata->init_level;
Lucille Sylvester67b4c532012-02-08 11:24:31 -0800616 pwr->default_pwrlevel = pdata->init_level;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600617 for (i = 0; i < pdata->num_levels; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 pwr->pwrlevels[i].gpu_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600619 (pdata->pwrlevel[i].gpu_freq > 0) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 clk_round_rate(pwr->grp_clks[0],
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600621 pdata->pwrlevel[i].
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 gpu_freq) : 0;
623 pwr->pwrlevels[i].bus_freq =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600624 pdata->pwrlevel[i].bus_freq;
Lucille Sylvester596d4c22011-10-19 18:04:01 -0600625 pwr->pwrlevels[i].io_fraction =
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600626 pdata->pwrlevel[i].io_fraction;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627 }
628 /* Do not set_rate for targets in sync with AXI */
629 if (pwr->pwrlevels[0].gpu_freq > 0)
630 clk_set_rate(pwr->grp_clks[0], pwr->
631 pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
632
Matt Wagantalld6fbf232012-05-03 20:09:28 -0700633 pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634 if (IS_ERR(pwr->gpu_reg))
635 pwr->gpu_reg = NULL;
636
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700637 if (pwr->gpu_reg) {
Pu Chen12053782012-07-24 17:04:27 -0700638 pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
639 if (IS_ERR(pwr->gpu_cx))
640 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700641 } else
Pu Chen12053782012-07-24 17:04:27 -0700642 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700643
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 pwr->power_flags = 0;
645
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600646 pwr->nap_allowed = pdata->nap_allowed;
Kedar Joshic11d0982012-02-07 10:59:49 +0530647 pwr->idle_needed = pdata->idle_needed;
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600648 pwr->interval_timeout = pdata->idle_timeout;
Lynus Vazfe4bede2012-04-06 11:53:30 -0700649 pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
Matt Wagantall9dc01632011-08-17 18:55:04 -0700650 pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 if (IS_ERR(pwr->ebi1_clk))
652 pwr->ebi1_clk = NULL;
653 else
654 clk_set_rate(pwr->ebi1_clk,
655 pwr->pwrlevels[pwr->active_pwrlevel].
656 bus_freq);
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600657 if (pdata->bus_scale_table != NULL) {
658 pwr->pcl = msm_bus_scale_register_client(pdata->
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 bus_scale_table);
660 if (!pwr->pcl) {
661 KGSL_PWR_ERR(device,
662 "msm_bus_scale_register_client failed: "
663 "id %d table %p", device->id,
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600664 pdata->bus_scale_table);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 result = -EINVAL;
666 goto done;
667 }
668 }
669
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700671 pm_runtime_enable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 register_early_suspend(&device->display_off);
673 return result;
674
675clk_err:
676 result = PTR_ERR(clk);
677 KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600678 clks[i].name, result);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700679
680done:
681 return result;
682}
683
684void kgsl_pwrctrl_close(struct kgsl_device *device)
685{
686 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
687 int i;
688
689 KGSL_PWR_INFO(device, "close device %d\n", device->id);
690
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -0700691 pm_runtime_disable(device->parentdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700692 unregister_early_suspend(&device->display_off);
693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 clk_put(pwr->ebi1_clk);
695
696 if (pwr->pcl)
697 msm_bus_scale_unregister_client(pwr->pcl);
698
699 pwr->pcl = 0;
700
701 if (pwr->gpu_reg) {
702 regulator_put(pwr->gpu_reg);
703 pwr->gpu_reg = NULL;
704 }
705
Pu Chen12053782012-07-24 17:04:27 -0700706 if (pwr->gpu_cx) {
707 regulator_put(pwr->gpu_cx);
708 pwr->gpu_cx = NULL;
Pu Chenfe0dd3a2012-06-01 14:39:08 -0700709 }
710
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 for (i = 1; i < KGSL_MAX_CLKS; i++)
712 if (pwr->grp_clks[i]) {
713 clk_put(pwr->grp_clks[i]);
714 pwr->grp_clks[i] = NULL;
715 }
716
717 pwr->grp_clks[0] = NULL;
718 pwr->power_flags = 0;
719}
720
721void kgsl_idle_check(struct work_struct *work)
722{
723 struct kgsl_device *device = container_of(work, struct kgsl_device,
724 idle_check_ws);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700725 WARN_ON(device == NULL);
726 if (device == NULL)
727 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728
729 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730 if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
Suman Tatiraju3de11cc2012-06-20 10:33:32 -0700731 kgsl_pwrscale_idle(device, 0);
Lucille Sylvesterc4af3552011-10-27 11:44:24 -0600732
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700733 if (kgsl_pwrctrl_sleep(device) != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 mod_timer(&device->idle_timer,
735 jiffies +
736 device->pwrctrl.interval_timeout);
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700737 /* If the GPU has been too busy to sleep, make sure *
738 * that is acurately reflected in the % busy numbers. */
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800739 device->pwrctrl.clk_stats.no_nap_cnt++;
740 if (device->pwrctrl.clk_stats.no_nap_cnt >
741 UPDATE_BUSY) {
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700742 kgsl_pwrctrl_busy_time(device, true);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800743 device->pwrctrl.clk_stats.no_nap_cnt = 0;
Suman Tatiraju7fe62a32011-07-14 16:40:37 -0700744 }
745 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746 } else if (device->state & (KGSL_STATE_HUNG |
747 KGSL_STATE_DUMP_AND_RECOVER)) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700748 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 }
750
751 mutex_unlock(&device->mutex);
752}
753
754void kgsl_timer(unsigned long data)
755{
756 struct kgsl_device *device = (struct kgsl_device *) data;
757
758 KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
Anoop Kumar Yerukala03ba25f2012-01-23 17:32:02 +0530759 if (device->requested_state != KGSL_STATE_SUSPEND) {
Lynus Vazfe4bede2012-04-06 11:53:30 -0700760 if (device->pwrctrl.restore_slumber ||
761 device->pwrctrl.strtstp_sleepwake)
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700762 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
763 else
764 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 /* Have work run in a non-interrupt context. */
766 queue_work(device->work_queue, &device->idle_check_ws);
767 }
768}
769
770void kgsl_pre_hwaccess(struct kgsl_device *device)
771{
772 BUG_ON(!mutex_is_locked(&device->mutex));
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700773 switch (device->state) {
774 case KGSL_STATE_ACTIVE:
775 return;
776 case KGSL_STATE_NAP:
777 case KGSL_STATE_SLEEP:
778 case KGSL_STATE_SLUMBER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 kgsl_pwrctrl_wake(device);
Lucille Sylvester8b803e92012-01-12 15:19:55 -0700780 break;
781 case KGSL_STATE_SUSPEND:
782 kgsl_check_suspended(device);
783 break;
784 case KGSL_STATE_INIT:
785 case KGSL_STATE_HUNG:
786 case KGSL_STATE_DUMP_AND_RECOVER:
787 if (test_bit(KGSL_PWRFLAGS_CLK_ON,
788 &device->pwrctrl.power_flags))
789 break;
790 else
791 KGSL_PWR_ERR(device,
792 "hw access while clocks off from state %d\n",
793 device->state);
794 break;
795 default:
796 KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
797 device->state);
798 break;
799 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800}
801EXPORT_SYMBOL(kgsl_pre_hwaccess);
802
803void kgsl_check_suspended(struct kgsl_device *device)
804{
805 if (device->requested_state == KGSL_STATE_SUSPEND ||
806 device->state == KGSL_STATE_SUSPEND) {
807 mutex_unlock(&device->mutex);
808 wait_for_completion(&device->hwaccess_gate);
809 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700810 } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811 mutex_unlock(&device->mutex);
812 wait_for_completion(&device->recovery_gate);
813 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700814 } else if (device->state == KGSL_STATE_SLUMBER)
815 kgsl_pwrctrl_wake(device);
816}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817
Suman Tatiraju24569022011-10-27 11:11:12 -0700818static int
Jeremy Gebben388c2972011-12-16 09:05:07 -0700819_nap(struct kgsl_device *device)
Suman Tatiraju24569022011-10-27 11:11:12 -0700820{
Suman Tatiraju24569022011-10-27 11:11:12 -0700821 switch (device->state) {
822 case KGSL_STATE_ACTIVE:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700823 if (!device->ftbl->isidle(device)) {
824 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
825 return -EBUSY;
826 }
827 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600828 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
829 kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
Suman Tatiraju24569022011-10-27 11:11:12 -0700830 case KGSL_STATE_NAP:
831 case KGSL_STATE_SLEEP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700832 case KGSL_STATE_SLUMBER:
Suman Tatiraju24569022011-10-27 11:11:12 -0700833 break;
834 default:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700835 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Suman Tatiraju24569022011-10-27 11:11:12 -0700836 break;
837 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700838 return 0;
839}
840
841static void
842_sleep_accounting(struct kgsl_device *device)
843{
844 kgsl_pwrctrl_busy_time(device, false);
Suman Tatiraju2bdd0562012-01-26 14:49:46 -0800845 device->pwrctrl.clk_stats.start = ktime_set(0, 0);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700846 device->pwrctrl.time = 0;
847 kgsl_pwrscale_sleep(device);
848}
849
850static int
851_sleep(struct kgsl_device *device)
852{
853 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
854 switch (device->state) {
855 case KGSL_STATE_ACTIVE:
856 if (!device->ftbl->isidle(device)) {
857 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
858 return -EBUSY;
859 }
860 /* fall through */
861 case KGSL_STATE_NAP:
862 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
863 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
864 if (pwr->pwrlevels[0].gpu_freq > 0)
865 clk_set_rate(pwr->grp_clks[0],
866 pwr->pwrlevels[pwr->num_pwrlevels - 1].
867 gpu_freq);
868 _sleep_accounting(device);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600869 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700870 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
Lucille Sylvester10297892012-02-27 13:54:47 -0700871 pm_qos_update_request(&device->pm_qos_req_dma,
872 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700873 break;
874 case KGSL_STATE_SLEEP:
875 case KGSL_STATE_SLUMBER:
876 break;
877 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700878 KGSL_PWR_WARN(device, "unhandled state %s\n",
879 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700880 break;
881 }
882 return 0;
883}
884
885static int
886_slumber(struct kgsl_device *device)
887{
888 switch (device->state) {
889 case KGSL_STATE_ACTIVE:
890 if (!device->ftbl->isidle(device)) {
891 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700892 return -EBUSY;
893 }
894 /* fall through */
895 case KGSL_STATE_NAP:
896 case KGSL_STATE_SLEEP:
897 del_timer_sync(&device->idle_timer);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700898 device->ftbl->suspend_context(device);
899 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700900 _sleep_accounting(device);
901 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700902 pm_qos_update_request(&device->pm_qos_req_dma,
903 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700904 break;
905 case KGSL_STATE_SLUMBER:
906 break;
907 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700908 KGSL_PWR_WARN(device, "unhandled state %s\n",
909 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700910 break;
911 }
912 return 0;
Suman Tatiraju24569022011-10-27 11:11:12 -0700913}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914
915/******************************************************************/
916/* Caller must hold the device mutex. */
917int kgsl_pwrctrl_sleep(struct kgsl_device *device)
918{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700919 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
921
922 /* Work through the legal state transitions */
Jeremy Gebben388c2972011-12-16 09:05:07 -0700923 switch (device->requested_state) {
924 case KGSL_STATE_NAP:
Jeremy Gebben388c2972011-12-16 09:05:07 -0700925 status = _nap(device);
926 break;
927 case KGSL_STATE_SLEEP:
Lucille Sylvestera985adf2012-01-16 11:11:55 -0700928 status = _sleep(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700929 break;
930 case KGSL_STATE_SLUMBER:
931 status = _slumber(device);
932 break;
933 default:
934 KGSL_PWR_INFO(device, "bad state request 0x%x\n",
935 device->requested_state);
936 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
937 status = -EINVAL;
938 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 }
Suman Tatiraju24569022011-10-27 11:11:12 -0700940 return status;
941}
Jeremy Gebben388c2972011-12-16 09:05:07 -0700942EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
Suman Tatiraju24569022011-10-27 11:11:12 -0700943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944/******************************************************************/
945/* Caller must hold the device mutex. */
946void kgsl_pwrctrl_wake(struct kgsl_device *device)
947{
Jeremy Gebben388c2972011-12-16 09:05:07 -0700948 int status;
949 kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
950 switch (device->state) {
951 case KGSL_STATE_SLUMBER:
952 status = device->ftbl->start(device, 0);
953 if (status) {
954 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
955 KGSL_DRV_ERR(device, "start failed %d\n", status);
956 break;
957 }
958 /* fall through */
959 case KGSL_STATE_SLEEP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
961 kgsl_pwrscale_wake(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700962 /* fall through */
963 case KGSL_STATE_NAP:
964 /* Turn on the core clocks */
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600965 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700966 /* Enable state before turning on irq */
967 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
968 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
969 /* Re-enable HW access */
970 mod_timer(&device->idle_timer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 jiffies + device->pwrctrl.interval_timeout);
Devin Kim66ad4c02012-09-21 20:28:50 -0700972 pm_qos_update_request(&device->pm_qos_req_dma,
973 GPU_SWFI_LATENCY);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700974 case KGSL_STATE_ACTIVE:
975 break;
976 default:
Jeremy Gebbenb50f3312011-12-16 08:58:33 -0700977 KGSL_PWR_WARN(device, "unhandled state %s\n",
978 kgsl_pwrstate_to_str(device->state));
Jeremy Gebben388c2972011-12-16 09:05:07 -0700979 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
980 break;
981 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982}
983EXPORT_SYMBOL(kgsl_pwrctrl_wake);
984
985void kgsl_pwrctrl_enable(struct kgsl_device *device)
986{
987 /* Order pwrrail/clk sequence based upon platform */
988 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600989 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
991}
992EXPORT_SYMBOL(kgsl_pwrctrl_enable);
993
994void kgsl_pwrctrl_disable(struct kgsl_device *device)
995{
996 /* Order pwrrail/clk sequence based upon platform */
997 kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
Lucille Sylvestere4a7c1a2012-04-11 12:17:38 -0600998 kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999 kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
1000}
1001EXPORT_SYMBOL(kgsl_pwrctrl_disable);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001002
1003void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
1004{
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001005 trace_kgsl_pwr_set_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001006 device->state = state;
1007 device->requested_state = KGSL_STATE_NONE;
1008}
1009EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
1010
1011void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
1012{
1013 if (state != KGSL_STATE_NONE && state != device->requested_state)
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001014 trace_kgsl_pwr_request_state(device, state);
Jeremy Gebben388c2972011-12-16 09:05:07 -07001015 device->requested_state = state;
1016}
1017EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
Jeremy Gebbenb50f3312011-12-16 08:58:33 -07001018
1019const char *kgsl_pwrstate_to_str(unsigned int state)
1020{
1021 switch (state) {
1022 case KGSL_STATE_NONE:
1023 return "NONE";
1024 case KGSL_STATE_INIT:
1025 return "INIT";
1026 case KGSL_STATE_ACTIVE:
1027 return "ACTIVE";
1028 case KGSL_STATE_NAP:
1029 return "NAP";
1030 case KGSL_STATE_SLEEP:
1031 return "SLEEP";
1032 case KGSL_STATE_SUSPEND:
1033 return "SUSPEND";
1034 case KGSL_STATE_HUNG:
1035 return "HUNG";
1036 case KGSL_STATE_DUMP_AND_RECOVER:
1037 return "DNR";
1038 case KGSL_STATE_SLUMBER:
1039 return "SLUMBER";
1040 default:
1041 break;
1042 }
1043 return "UNKNOWN";
1044}
1045EXPORT_SYMBOL(kgsl_pwrstate_to_str);
1046