blob: 586d97f89e0224ef5354bd1aa32b5d3e67fb632f [file] [log] [blame]
Pavel Machekd480ace2009-09-22 16:47:03 -07001/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
Pavel Machekd480ace2009-09-22 16:47:03 -07006 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/module.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070019#include <linux/kernel.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/sched.h>
21#include <linux/time.h>
22#include <linux/init.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070023#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <linux/spinlock.h>
25#include <linux/hrtimer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070026#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/hardware.h>
28#include <linux/io.h>
29#include <linux/debugfs.h>
30#include <linux/delay.h>
31#include <linux/mutex.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <asm/system.h>
36#include <asm/mach-types.h>
37#include <linux/semaphore.h>
38#include <linux/uaccess.h>
39#include <mach/clk.h>
40#include "mdp.h"
41#include "msm_fb.h"
42#ifdef CONFIG_FB_MSM_MDP40
43#include "mdp4.h"
44#endif
45#include "mipi_dsi.h"
Pavel Machekd480ace2009-09-22 16:47:03 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047uint32 mdp4_extn_disp;
Pavel Machekd480ace2009-09-22 16:47:03 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049static struct clk *mdp_clk;
50static struct clk *mdp_pclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051static struct clk *mdp_lut_clk;
52int mdp_rev;
Pavel Machekd480ace2009-09-22 16:47:03 -070053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054struct regulator *footswitch;
Pavel Machekd480ace2009-09-22 16:47:03 -070055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056struct completion mdp_ppp_comp;
57struct semaphore mdp_ppp_mutex;
58struct semaphore mdp_pipe_ctrl_mutex;
Pavel Machekd480ace2009-09-22 16:47:03 -070059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060unsigned long mdp_timer_duration = (HZ/20); /* 50 msecond */
Pavel Machekd480ace2009-09-22 16:47:03 -070061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062boolean mdp_ppp_waiting = FALSE;
63uint32 mdp_tv_underflow_cnt;
64uint32 mdp_lcdc_underflow_cnt;
65
66boolean mdp_current_clk_on = FALSE;
67boolean mdp_is_in_isr = FALSE;
68
69/*
70 * legacy mdp_in_processing is only for DMA2-MDDI
71 * this applies to DMA2 block only
72 */
73uint32 mdp_in_processing = FALSE;
74
75#ifdef CONFIG_FB_MSM_MDP40
76uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
77#else
78uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
79#endif
80
81MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
82
83atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
84
85spinlock_t mdp_spin_lock;
86struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
87struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
88
89static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
90static struct delayed_work mdp_pipe_ctrl_worker;
91
92static boolean mdp_suspended = FALSE;
93DEFINE_MUTEX(mdp_suspend_mutex);
94
95#ifdef CONFIG_FB_MSM_MDP40
96struct mdp_dma_data dma2_data;
97struct mdp_dma_data dma_s_data;
98struct mdp_dma_data dma_e_data;
99ulong mdp4_display_intf;
100#else
101static struct mdp_dma_data dma2_data;
102static struct mdp_dma_data dma_s_data;
103#ifndef CONFIG_FB_MSM_MDP303
104static struct mdp_dma_data dma_e_data;
105#endif
106#endif
107static struct mdp_dma_data dma3_data;
108
109extern ktime_t mdp_dma2_last_update_time;
110
111extern uint32 mdp_dma2_update_time_in_usec;
112extern int mdp_lcd_rd_cnt_offset_slow;
113extern int mdp_lcd_rd_cnt_offset_fast;
114extern int mdp_usec_diff_threshold;
115
116#ifdef CONFIG_FB_MSM_LCDC
117extern int first_pixel_start_x;
118extern int first_pixel_start_y;
119#endif
120
121#ifdef MSM_FB_ENABLE_DBGFS
122struct dentry *mdp_dir;
123#endif
124
125#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
126static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
127#else
128#define mdp_suspend NULL
129#endif
130
131struct timeval mdp_dma2_timeval;
132struct timeval mdp_ppp_timeval;
133
134#ifdef CONFIG_HAS_EARLYSUSPEND
135static struct early_suspend early_suspend;
136#endif
137
138static u32 mdp_irq;
139
140static uint32 mdp_prim_panel_type = NO_PANEL;
141#ifndef CONFIG_FB_MSM_MDP22
142DEFINE_MUTEX(mdp_lut_push_sem);
143static int mdp_lut_i;
144static int mdp_lut_hw_update(struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700145{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 int i;
147 u16 *c[3];
148 u16 r, g, b;
Pavel Machekd480ace2009-09-22 16:47:03 -0700149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150 c[0] = cmap->green;
151 c[1] = cmap->blue;
152 c[2] = cmap->red;
Pavel Machekd480ace2009-09-22 16:47:03 -0700153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 for (i = 0; i < cmap->len; i++) {
155 if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
156 copy_from_user(&g, cmap->green++, sizeof(g)) ||
157 copy_from_user(&b, cmap->blue++, sizeof(b)))
158 return -EFAULT;
159
160#ifdef CONFIG_FB_MSM_MDP40
161 MDP_OUTP(MDP_BASE + 0x94800 +
162#else
163 MDP_OUTP(MDP_BASE + 0x93800 +
164#endif
165 (0x400*mdp_lut_i) + cmap->start*4 + i*4,
166 ((g & 0xff) |
167 ((b & 0xff) << 8) |
168 ((r & 0xff) << 16)));
Pavel Machekd480ace2009-09-22 16:47:03 -0700169 }
170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700172}
173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174static int mdp_lut_push;
175static int mdp_lut_push_i;
176static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 int ret;
179
180 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
181 ret = mdp_lut_hw_update(cmap);
182 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
183
184 if (ret)
185 return ret;
186
187 mutex_lock(&mdp_lut_push_sem);
188 mdp_lut_push = 1;
189 mdp_lut_push_i = mdp_lut_i;
190 mutex_unlock(&mdp_lut_push_sem);
191
192 mdp_lut_i = (mdp_lut_i + 1)%2;
193
194 return 0;
195}
196
197static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
198{
199 int ret;
200
201 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
202 ret = mdp_lut_hw_update(cmap);
203
204 if (ret) {
205 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
206 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -0700207 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208
209 MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
210 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
211 mdp_lut_i = (mdp_lut_i + 1)%2;
212
213 return 0;
214}
215
216static void mdp_lut_enable(void)
217{
218 if (mdp_lut_push) {
219 mutex_lock(&mdp_lut_push_sem);
220 mdp_lut_push = 0;
221 MDP_OUTP(MDP_BASE + 0x90070,
222 (mdp_lut_push_i << 10) | 0x17);
223 mutex_unlock(&mdp_lut_push_sem);
224 }
225}
226
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700227#define MDP_REV42_HIST_MAX_BIN 128
228#define MDP_REV41_HIST_MAX_BIN 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229
230#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700231unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232struct completion mdp_hist_comp;
233boolean mdp_is_hist_start = FALSE;
234#else
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700235static unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236static struct completion mdp_hist_comp;
237static boolean mdp_is_hist_start = FALSE;
238#endif
239static DEFINE_MUTEX(mdp_hist_mutex);
240
241int mdp_histogram_ctrl(boolean en)
242{
243 unsigned long flag;
244 boolean hist_start;
245 spin_lock_irqsave(&mdp_spin_lock, flag);
246 hist_start = mdp_is_hist_start;
247 spin_unlock_irqrestore(&mdp_spin_lock, flag);
248
249 if (hist_start == TRUE) {
250 if (en == TRUE) {
251 mdp_enable_irq(MDP_HISTOGRAM_TERM);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700252 mdp_hist_frame_cnt = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
254#ifdef CONFIG_FB_MSM_MDP40
255 MDP_OUTP(MDP_BASE + 0x95010, 1);
256 MDP_OUTP(MDP_BASE + 0x9501c, INTR_HIST_DONE);
257 MDP_OUTP(MDP_BASE + 0x95004, 1);
258 MDP_OUTP(MDP_BASE + 0x95000, 1);
259#else
260 MDP_OUTP(MDP_BASE + 0x94004, 1);
261 MDP_OUTP(MDP_BASE + 0x94000, 1);
262#endif
263 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
264 FALSE);
265 } else
266 mdp_disable_irq(MDP_HISTOGRAM_TERM);
Pavel Machekd480ace2009-09-22 16:47:03 -0700267 }
268 return 0;
269}
270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271int mdp_start_histogram(struct fb_info *info)
Pavel Machekd480ace2009-09-22 16:47:03 -0700272{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273 unsigned long flag;
Pavel Machekd480ace2009-09-22 16:47:03 -0700274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 int ret = 0;
276 mutex_lock(&mdp_hist_mutex);
277 if (mdp_is_hist_start == TRUE) {
278 printk(KERN_ERR "%s histogram already started\n", __func__);
279 ret = -EPERM;
280 goto mdp_hist_start_err;
281 }
282
283 spin_lock_irqsave(&mdp_spin_lock, flag);
284 mdp_is_hist_start = TRUE;
285 spin_unlock_irqrestore(&mdp_spin_lock, flag);
286 mdp_enable_irq(MDP_HISTOGRAM_TERM);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700287 mdp_hist_frame_cnt = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
289#ifdef CONFIG_FB_MSM_MDP40
290 MDP_OUTP(MDP_BASE + 0x95004, 1);
291 MDP_OUTP(MDP_BASE + 0x95000, 1);
292#else
293 MDP_OUTP(MDP_BASE + 0x94004, 1);
294 MDP_OUTP(MDP_BASE + 0x94000, 1);
295#endif
296 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
297
298mdp_hist_start_err:
299 mutex_unlock(&mdp_hist_mutex);
300 return ret;
301
302}
303int mdp_stop_histogram(struct fb_info *info)
304{
305 unsigned long flag;
306 int ret = 0;
307 mutex_lock(&mdp_hist_mutex);
308 if (!mdp_is_hist_start) {
309 printk(KERN_ERR "%s histogram already stopped\n", __func__);
310 ret = -EPERM;
311 goto mdp_hist_stop_err;
312 }
313 spin_lock_irqsave(&mdp_spin_lock, flag);
314 mdp_is_hist_start = FALSE;
315 spin_unlock_irqrestore(&mdp_spin_lock, flag);
316 /* disable the irq for histogram since we handled it
317 when the control reaches here */
318 mdp_disable_irq(MDP_HISTOGRAM_TERM);
319
320mdp_hist_stop_err:
321 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700322 return ret;
323}
324
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700325static int mdp_copy_hist_data(struct mdp_histogram *hist)
Pavel Machekd480ace2009-09-22 16:47:03 -0700326{
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700327 char *mdp_hist_base;
328 uint32 r_data_offset = 0x100, g_data_offset = 0x200;
329 uint32 b_data_offset = 0x300;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 int ret = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700331
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700332 mutex_lock(&mdp_hist_mutex);
333 if (mdp_rev >= MDP_REV_42) {
334 mdp_hist_base = MDP_BASE + 0x95000;
335 r_data_offset = 0x400;
336 g_data_offset = 0x800;
337 b_data_offset = 0xc00;
338 } else if (mdp_rev >= MDP_REV_40 && mdp_rev <= MDP_REV_41) {
339 mdp_hist_base = MDP_BASE + 0x95000;
340 } else if (mdp_rev >= MDP_REV_30 && mdp_rev <= MDP_REV_31) {
341 mdp_hist_base = MDP_BASE + 0x94000;
342 } else {
343 pr_err("%s(): Unsupported MDP rev %u\n", __func__, mdp_rev);
344 return -EPERM;
345 }
346
347 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
348 if (hist->r) {
349 ret = copy_to_user(hist->r, mdp_hist_base + r_data_offset,
350 hist->bin_cnt * 4);
351 if (ret)
352 goto hist_err;
353 }
354 if (hist->g) {
355 ret = copy_to_user(hist->g, mdp_hist_base + g_data_offset,
356 hist->bin_cnt * 4);
357 if (ret)
358 goto hist_err;
359 }
360 if (hist->b) {
361 ret = copy_to_user(hist->b, mdp_hist_base + b_data_offset,
362 hist->bin_cnt * 4);
363 if (ret)
364 goto hist_err;
365 }
366
367 if (mdp_is_hist_start == TRUE) {
368 MDP_OUTP(mdp_hist_base + 0x004,
369 mdp_hist_frame_cnt);
370 MDP_OUTP(mdp_hist_base, 1);
371 }
372 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
373 mutex_unlock(&mdp_hist_mutex);
374 return 0;
375
376hist_err:
377 printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
378 return ret;
379}
380
381static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
382{
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700383 if (!hist->frame_cnt || (hist->bin_cnt == 0))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 return -EINVAL;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700385
386 if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN)
387 || (mdp_rev == MDP_REV_42 &&
388 hist->bin_cnt > MDP_REV42_HIST_MAX_BIN))
389 return -EINVAL;
390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391 mutex_lock(&mdp_hist_mutex);
392 if (!mdp_is_hist_start) {
393 printk(KERN_ERR "%s histogram not started\n", __func__);
394 mutex_unlock(&mdp_hist_mutex);
395 return -EPERM;
396 }
397 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 INIT_COMPLETION(mdp_hist_comp);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700400 mdp_hist_frame_cnt = hist->frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401 wait_for_completion_killable(&mdp_hist_comp);
402
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700403 return mdp_copy_hist_data(hist);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404}
405#endif
406
407/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
408
409int mdp_ppp_pipe_wait(void)
410{
411 int ret = 1;
412
413 /* wait 5 seconds for the operation to complete before declaring
414 the MDP hung */
415
416 if (mdp_ppp_waiting == TRUE) {
417 ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
418 5 * HZ);
419
420 if (!ret)
421 printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
422 __func__);
Pavel Machekd480ace2009-09-22 16:47:03 -0700423 }
424
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 return ret;
426}
Pavel Machekd480ace2009-09-22 16:47:03 -0700427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428static DEFINE_SPINLOCK(mdp_lock);
429static int mdp_irq_mask;
430static int mdp_irq_enabled;
Pavel Machekd480ace2009-09-22 16:47:03 -0700431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432/*
433 * mdp_enable_irq: can not be called from isr
434 */
435void mdp_enable_irq(uint32 term)
436{
437 unsigned long irq_flags;
438
439 spin_lock_irqsave(&mdp_lock, irq_flags);
440 if (mdp_irq_mask & term) {
441 printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
442 __func__, term, mdp_irq_mask, mdp_irq_enabled);
443 } else {
444 mdp_irq_mask |= term;
445 if (mdp_irq_mask && !mdp_irq_enabled) {
446 mdp_irq_enabled = 1;
447 enable_irq(mdp_irq);
448 }
449 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700450 spin_unlock_irqrestore(&mdp_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451}
452
453/*
454 * mdp_disable_irq: can not be called from isr
455 */
456void mdp_disable_irq(uint32 term)
457{
458 unsigned long irq_flags;
459
460 spin_lock_irqsave(&mdp_lock, irq_flags);
461 if (!(mdp_irq_mask & term)) {
462 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
463 __func__, term, mdp_irq_mask, mdp_irq_enabled);
464 } else {
465 mdp_irq_mask &= ~term;
466 if (!mdp_irq_mask && mdp_irq_enabled) {
467 mdp_irq_enabled = 0;
468 disable_irq(mdp_irq);
469 }
470 }
471 spin_unlock_irqrestore(&mdp_lock, irq_flags);
472}
473
474void mdp_disable_irq_nosync(uint32 term)
475{
476 spin_lock(&mdp_lock);
477 if (!(mdp_irq_mask & term)) {
478 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
479 __func__, term, mdp_irq_mask, mdp_irq_enabled);
480 } else {
481 mdp_irq_mask &= ~term;
482 if (!mdp_irq_mask && mdp_irq_enabled) {
483 mdp_irq_enabled = 0;
484 disable_irq_nosync(mdp_irq);
485 }
486 }
487 spin_unlock(&mdp_lock);
488}
489
490void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
491{
492 /* complete all the writes before starting */
493 wmb();
494
495 /* kick off PPP engine */
496 if (term == MDP_PPP_TERM) {
497 if (mdp_debug[MDP_PPP_BLOCK])
498 jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
499
500 /* let's turn on PPP block */
501 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
502
503 mdp_enable_irq(term);
504 INIT_COMPLETION(mdp_ppp_comp);
505 mdp_ppp_waiting = TRUE;
506 outpdw(MDP_BASE + 0x30, 0x1000);
507 wait_for_completion_killable(&mdp_ppp_comp);
508 mdp_disable_irq(term);
509
510 if (mdp_debug[MDP_PPP_BLOCK]) {
511 struct timeval now;
512
513 jiffies_to_timeval(jiffies, &now);
514 mdp_ppp_timeval.tv_usec =
515 now.tv_usec - mdp_ppp_timeval.tv_usec;
516 MSM_FB_DEBUG("MDP-PPP: %d\n",
517 (int)mdp_ppp_timeval.tv_usec);
518 }
519 } else if (term == MDP_DMA2_TERM) {
520 if (mdp_debug[MDP_DMA2_BLOCK]) {
521 MSM_FB_DEBUG("MDP-DMA2: %d\n",
522 (int)mdp_dma2_timeval.tv_usec);
523 jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
524 }
525 /* DMA update timestamp */
526 mdp_dma2_last_update_time = ktime_get_real();
527 /* let's turn on DMA2 block */
528#if 0
529 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
530#endif
531#ifdef CONFIG_FB_MSM_MDP22
532 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
533#else
534 mdp_lut_enable();
535
536#ifdef CONFIG_FB_MSM_MDP40
537 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
538#else
539 outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
540
541#ifdef CONFIG_FB_MSM_MDP303
542
543#ifdef CONFIG_FB_MSM_MIPI_DSI
544 mipi_dsi_cmd_mdp_sw_trigger();
545#endif
546
547#endif
548
549#endif
550#endif
551#ifdef CONFIG_FB_MSM_MDP40
552 } else if (term == MDP_DMA_S_TERM) {
553 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
554 outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
555 } else if (term == MDP_DMA_E_TERM) {
556 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
557 outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
558 } else if (term == MDP_OVERLAY0_TERM) {
559 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
560 mdp_lut_enable();
561 outpdw(MDP_BASE + 0x0004, 0);
562 } else if (term == MDP_OVERLAY1_TERM) {
563 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
564 mdp_lut_enable();
565 outpdw(MDP_BASE + 0x0008, 0);
566 }
567#else
568 } else if (term == MDP_DMA_S_TERM) {
569 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
570 outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
571 } else if (term == MDP_DMA_E_TERM) {
572 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
573 outpdw(MDP_BASE + 0x004C, 0x0);
574 }
575#endif
576}
577static int mdp_clk_rate;
578static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
579static int pdev_list_cnt;
580
581static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
582{
583 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
584}
585void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
586 boolean isr)
587{
588 boolean mdp_all_blocks_off = TRUE;
589 int i;
590 unsigned long flag;
591 struct msm_fb_panel_data *pdata;
592
593 /*
594 * It is assumed that if isr = TRUE then start = OFF
595 * if start = ON when isr = TRUE it could happen that the usercontext
596 * could turn off the clocks while the interrupt is updating the
597 * power to ON
598 */
599 WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
600
601 spin_lock_irqsave(&mdp_spin_lock, flag);
602 if (MDP_BLOCK_POWER_ON == state) {
603 atomic_inc(&mdp_block_power_cnt[block]);
604
605 if (MDP_DMA2_BLOCK == block)
606 mdp_in_processing = TRUE;
607 } else {
608 atomic_dec(&mdp_block_power_cnt[block]);
609
610 if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
611 /*
612 * Master has to serve a request to power off MDP always
613 * It also has a timer to power off. So, in case of
614 * timer expires first and DMA2 finishes later,
615 * master has to power off two times
616 * There shouldn't be multiple power-off request for
617 * other blocks
618 */
619 if (block != MDP_MASTER_BLOCK) {
620 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
621 multiple power-off request\n", block);
622 }
623 atomic_set(&mdp_block_power_cnt[block], 0);
624 }
625
626 if (MDP_DMA2_BLOCK == block)
627 mdp_in_processing = FALSE;
628 }
629 spin_unlock_irqrestore(&mdp_spin_lock, flag);
630
631 /*
632 * If it's in isr, we send our request to workqueue.
633 * Otherwise, processing happens in the current context
634 */
635 if (isr) {
636 if (mdp_current_clk_on) {
637 /* checking all blocks power state */
638 for (i = 0; i < MDP_MAX_BLOCK; i++) {
639 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
640 mdp_all_blocks_off = FALSE;
641 break;
642 }
643 }
644
645 if (mdp_all_blocks_off) {
646 /* send workqueue to turn off mdp power */
647 queue_delayed_work(mdp_pipe_ctrl_wq,
648 &mdp_pipe_ctrl_worker,
649 mdp_timer_duration);
650 }
651 }
652 } else {
653 down(&mdp_pipe_ctrl_mutex);
654 /* checking all blocks power state */
655 for (i = 0; i < MDP_MAX_BLOCK; i++) {
656 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
657 mdp_all_blocks_off = FALSE;
658 break;
659 }
660 }
661
662 /*
663 * find out whether a delayable work item is currently
664 * pending
665 */
666
667 if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
668 /*
669 * try to cancel the current work if it fails to
670 * stop (which means del_timer can't delete it
671 * from the list, it's about to expire and run),
672 * we have to let it run. queue_delayed_work won't
673 * accept the next job which is same as
674 * queue_delayed_work(mdp_timer_duration = 0)
675 */
676 cancel_delayed_work(&mdp_pipe_ctrl_worker);
677 }
678
679 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
680 mutex_lock(&mdp_suspend_mutex);
681 if (block == MDP_MASTER_BLOCK || mdp_suspended) {
682 mdp_current_clk_on = FALSE;
683 mb();
684 /* turn off MDP clks */
685 mdp_vsync_clk_disable();
686 for (i = 0; i < pdev_list_cnt; i++) {
687 pdata = (struct msm_fb_panel_data *)
688 pdev_list[i]->dev.platform_data;
689 if (pdata && pdata->clk_func)
690 pdata->clk_func(0);
691 }
692 if (mdp_clk != NULL) {
693 mdp_clk_rate = clk_get_rate(mdp_clk);
694 clk_disable(mdp_clk);
695 if (mdp_hw_revision <=
696 MDP4_REVISION_V2_1 &&
697 mdp_clk_rate > 122880000) {
698 clk_set_rate(mdp_clk,
699 122880000);
700 }
701 MSM_FB_DEBUG("MDP CLK OFF\n");
702 }
703 if (mdp_pclk != NULL) {
704 clk_disable(mdp_pclk);
705 MSM_FB_DEBUG("MDP PCLK OFF\n");
706 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700707 if (mdp_lut_clk != NULL)
708 clk_disable(mdp_lut_clk);
Nagamalleswararao Ganjibea47832011-08-15 10:49:59 -0700709 if (footswitch != NULL)
710 regulator_disable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 } else {
712 /* send workqueue to turn off mdp power */
713 queue_delayed_work(mdp_pipe_ctrl_wq,
714 &mdp_pipe_ctrl_worker,
715 mdp_timer_duration);
716 }
717 mutex_unlock(&mdp_suspend_mutex);
718 } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
719 mdp_current_clk_on = TRUE;
720 /* turn on MDP clks */
Nagamalleswararao Ganjibea47832011-08-15 10:49:59 -0700721 if (footswitch != NULL)
722 regulator_enable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723 for (i = 0; i < pdev_list_cnt; i++) {
724 pdata = (struct msm_fb_panel_data *)
725 pdev_list[i]->dev.platform_data;
726 if (pdata && pdata->clk_func)
727 pdata->clk_func(1);
728 }
729 if (mdp_clk != NULL) {
730 if (mdp_hw_revision <=
731 MDP4_REVISION_V2_1 &&
732 mdp_clk_rate > 122880000) {
733 clk_set_rate(mdp_clk,
734 mdp_clk_rate);
735 }
736 clk_enable(mdp_clk);
737 MSM_FB_DEBUG("MDP CLK ON\n");
738 }
739 if (mdp_pclk != NULL) {
740 clk_enable(mdp_pclk);
741 MSM_FB_DEBUG("MDP PCLK ON\n");
742 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743 if (mdp_lut_clk != NULL)
744 clk_enable(mdp_lut_clk);
745 mdp_vsync_clk_enable();
746 }
747 up(&mdp_pipe_ctrl_mutex);
748 }
749}
750
751#ifndef CONFIG_FB_MSM_MDP40
752irqreturn_t mdp_isr(int irq, void *ptr)
753{
754 uint32 mdp_interrupt = 0;
755 struct mdp_dma_data *dma;
756
757 mdp_is_in_isr = TRUE;
758 do {
759 mdp_interrupt = inp32(MDP_INTR_STATUS);
760 outp32(MDP_INTR_CLEAR, mdp_interrupt);
761
762 mdp_interrupt &= mdp_intr_mask;
763
764 if (mdp_interrupt & TV_ENC_UNDERRUN) {
765 mdp_interrupt &= ~(TV_ENC_UNDERRUN);
766 mdp_tv_underflow_cnt++;
767 }
768
769 if (!mdp_interrupt)
770 break;
771
772 /* DMA3 TV-Out Start */
773 if (mdp_interrupt & TV_OUT_DMA3_START) {
774 /* let's disable TV out interrupt */
775 mdp_intr_mask &= ~TV_OUT_DMA3_START;
776 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
777
778 dma = &dma3_data;
779 if (dma->waiting) {
780 dma->waiting = FALSE;
781 complete(&dma->comp);
782 }
783 }
784#ifndef CONFIG_FB_MSM_MDP22
785 if (mdp_interrupt & MDP_HIST_DONE) {
786 outp32(MDP_BASE + 0x94018, 0x3);
787 outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 complete(&mdp_hist_comp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 }
790
791 /* LCDC UnderFlow */
792 if (mdp_interrupt & LCDC_UNDERFLOW) {
793 mdp_lcdc_underflow_cnt++;
794 /*when underflow happens HW resets all the histogram
795 registers that were set before so restore them back
796 to normal.*/
797 MDP_OUTP(MDP_BASE + 0x94010, 1);
798 MDP_OUTP(MDP_BASE + 0x9401c, 2);
799 if (mdp_is_hist_start == TRUE) {
800 MDP_OUTP(MDP_BASE + 0x94004,
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700801 mdp_hist_frame_cnt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802 MDP_OUTP(MDP_BASE + 0x94000, 1);
803 }
804 }
805 /* LCDC Frame Start */
806 if (mdp_interrupt & LCDC_FRAME_START) {
807 /* let's disable LCDC interrupt */
808 mdp_intr_mask &= ~LCDC_FRAME_START;
809 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
810
811 dma = &dma2_data;
812 if (dma->waiting) {
813 dma->waiting = FALSE;
814 complete(&dma->comp);
815 }
816 }
817
818 /* DMA2 LCD-Out Complete */
819 if (mdp_interrupt & MDP_DMA_S_DONE) {
820 dma = &dma_s_data;
821 dma->busy = FALSE;
822 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
823 TRUE);
824 complete(&dma->comp);
825 }
826 /* DMA_E LCD-Out Complete */
827 if (mdp_interrupt & MDP_DMA_E_DONE) {
828 dma = &dma_s_data;
829 dma->busy = FALSE;
830 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
831 TRUE);
832 complete(&dma->comp);
833 }
834
835#endif
836
837 /* DMA2 LCD-Out Complete */
838 if (mdp_interrupt & MDP_DMA_P_DONE) {
839 struct timeval now;
840
841 mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
842 mdp_dma2_last_update_time);
843 if (mdp_debug[MDP_DMA2_BLOCK]) {
844 jiffies_to_timeval(jiffies, &now);
845 mdp_dma2_timeval.tv_usec =
846 now.tv_usec - mdp_dma2_timeval.tv_usec;
847 }
848#ifndef CONFIG_FB_MSM_MDP303
849 dma = &dma2_data;
850 dma->busy = FALSE;
851 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
852 TRUE);
853 complete(&dma->comp);
854#else
855 if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
856 dma = &dma2_data;
857 dma->busy = FALSE;
858 mdp_pipe_ctrl(MDP_DMA2_BLOCK,
859 MDP_BLOCK_POWER_OFF, TRUE);
860 complete(&dma->comp);
861 }
862#endif
863 }
864 /* PPP Complete */
865 if (mdp_interrupt & MDP_PPP_DONE) {
866#ifdef CONFIG_FB_MSM_MDP31
867 MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
868#endif
869 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
870 if (mdp_ppp_waiting) {
871 mdp_ppp_waiting = FALSE;
872 complete(&mdp_ppp_comp);
873 }
874 }
875 } while (1);
876
877 mdp_is_in_isr = FALSE;
878
Pavel Machekd480ace2009-09-22 16:47:03 -0700879 return IRQ_HANDLED;
880}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883static void mdp_drv_init(void)
Pavel Machekd480ace2009-09-22 16:47:03 -0700884{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 int i;
Pavel Machekd480ace2009-09-22 16:47:03 -0700886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 for (i = 0; i < MDP_MAX_BLOCK; i++) {
888 mdp_debug[i] = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700889 }
890
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 /* initialize spin lock and workqueue */
892 spin_lock_init(&mdp_spin_lock);
893 mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
894 mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
895 mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
896 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
897 mdp_pipe_ctrl_workqueue_handler);
Pavel Machekd480ace2009-09-22 16:47:03 -0700898
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 /* initialize semaphore */
900 init_completion(&mdp_ppp_comp);
901 sema_init(&mdp_ppp_mutex, 1);
902 sema_init(&mdp_pipe_ctrl_mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 dma2_data.busy = FALSE;
905 dma2_data.dmap_busy = FALSE;
906 dma2_data.waiting = FALSE;
907 init_completion(&dma2_data.comp);
908 init_completion(&dma2_data.dmap_comp);
909 sema_init(&dma2_data.mutex, 1);
910 mutex_init(&dma2_data.ov_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 dma3_data.busy = FALSE;
913 dma3_data.waiting = FALSE;
914 init_completion(&dma3_data.comp);
915 sema_init(&dma3_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 dma_s_data.busy = FALSE;
918 dma_s_data.waiting = FALSE;
919 init_completion(&dma_s_data.comp);
920 sema_init(&dma_s_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922#ifndef CONFIG_FB_MSM_MDP303
923 dma_e_data.busy = FALSE;
924 dma_e_data.waiting = FALSE;
925 init_completion(&dma_e_data.comp);
926 mutex_init(&dma_e_data.ov_mutex);
927#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929#ifndef CONFIG_FB_MSM_MDP22
930 init_completion(&mdp_hist_comp);
931#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 /* initializing mdp power block counter to 0 */
934 for (i = 0; i < MDP_MAX_BLOCK; i++) {
935 atomic_set(&mdp_block_power_cnt[i], 0);
Pavel Machekd480ace2009-09-22 16:47:03 -0700936 }
937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938#ifdef MSM_FB_ENABLE_DBGFS
939 {
940 struct dentry *root;
941 char sub_name[] = "mdp";
Pavel Machekd480ace2009-09-22 16:47:03 -0700942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943 root = msm_fb_get_debugfs_root();
944 if (root != NULL) {
945 mdp_dir = debugfs_create_dir(sub_name, root);
946
947 if (mdp_dir) {
948 msm_fb_debugfs_file_create(mdp_dir,
949 "dma2_update_time_in_usec",
950 (u32 *) &mdp_dma2_update_time_in_usec);
951 msm_fb_debugfs_file_create(mdp_dir,
952 "vs_rdcnt_slow",
953 (u32 *) &mdp_lcd_rd_cnt_offset_slow);
954 msm_fb_debugfs_file_create(mdp_dir,
955 "vs_rdcnt_fast",
956 (u32 *) &mdp_lcd_rd_cnt_offset_fast);
957 msm_fb_debugfs_file_create(mdp_dir,
958 "mdp_usec_diff_threshold",
959 (u32 *) &mdp_usec_diff_threshold);
960 msm_fb_debugfs_file_create(mdp_dir,
961 "mdp_current_clk_on",
962 (u32 *) &mdp_current_clk_on);
963#ifdef CONFIG_FB_MSM_LCDC
964 msm_fb_debugfs_file_create(mdp_dir,
965 "lcdc_start_x",
966 (u32 *) &first_pixel_start_x);
967 msm_fb_debugfs_file_create(mdp_dir,
968 "lcdc_start_y",
969 (u32 *) &first_pixel_start_y);
970#endif
971 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700972 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700973 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974#endif
975}
976
977static int mdp_probe(struct platform_device *pdev);
978static int mdp_remove(struct platform_device *pdev);
979
980static int mdp_runtime_suspend(struct device *dev)
981{
982 dev_dbg(dev, "pm_runtime: suspending...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700983 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700984}
985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986static int mdp_runtime_resume(struct device *dev)
Pavel Machekd480ace2009-09-22 16:47:03 -0700987{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988 dev_dbg(dev, "pm_runtime: resuming...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700989 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700990}
991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992static struct dev_pm_ops mdp_dev_pm_ops = {
993 .runtime_suspend = mdp_runtime_suspend,
994 .runtime_resume = mdp_runtime_resume,
Pavel Machekd480ace2009-09-22 16:47:03 -0700995};
996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997
998static struct platform_driver mdp_driver = {
999 .probe = mdp_probe,
1000 .remove = mdp_remove,
1001#ifndef CONFIG_HAS_EARLYSUSPEND
1002 .suspend = mdp_suspend,
1003 .resume = NULL,
1004#endif
1005 .shutdown = NULL,
1006 .driver = {
1007 /*
1008 * Driver name must match the device name added in
1009 * platform.c.
1010 */
1011 .name = "mdp",
1012 .pm = &mdp_dev_pm_ops,
1013 },
1014};
1015
1016static int mdp_off(struct platform_device *pdev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001017{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 int ret = 0;
1019 mdp_histogram_ctrl(FALSE);
1020
1021 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1022 ret = panel_next_off(pdev);
1023 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1024
1025 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -07001026}
1027
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028static int mdp_on(struct platform_device *pdev)
1029{
1030 int ret = 0;
1031#ifdef CONFIG_FB_MSM_MDP40
1032 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1033 if (is_mdp4_hw_reset()) {
1034 mdp4_hw_init();
1035 outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
1036 }
1037 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1038#endif
1039 mdp_histogram_ctrl(TRUE);
1040
1041 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1042 ret = panel_next_on(pdev);
1043 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1044 return ret;
1045}
1046
1047static int mdp_resource_initialized;
1048static struct msm_panel_common_pdata *mdp_pdata;
1049
1050uint32 mdp_hw_revision;
1051
1052/*
1053 * mdp_hw_revision:
1054 * 0 == V1
1055 * 1 == V2
1056 * 2 == V2.1
1057 *
1058 */
1059void mdp_hw_version(void)
1060{
1061 char *cp;
1062 uint32 *hp;
1063
1064 if (mdp_pdata == NULL)
1065 return;
1066
1067 mdp_hw_revision = MDP4_REVISION_NONE;
1068 if (mdp_pdata->hw_revision_addr == 0)
1069 return;
1070
1071 /* tlmmgpio2 shadow */
1072 cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
1073
1074 if (cp == NULL)
1075 return;
1076
1077 hp = (uint32 *)cp; /* HW_REVISION_NUMBER */
1078 mdp_hw_revision = *hp;
1079 iounmap(cp);
1080
1081 mdp_hw_revision >>= 28; /* bit 31:28 */
1082 mdp_hw_revision &= 0x0f;
1083
1084 MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
1085 __func__, mdp_hw_revision);
1086}
1087
1088#ifdef CONFIG_FB_MSM_MDP40
1089static void configure_mdp_core_clk_table(uint32 min_clk_rate)
1090{
1091 uint8 count;
1092 uint32 current_rate;
1093 if (mdp_clk && mdp_pdata
1094 && mdp_pdata->mdp_core_clk_table) {
1095 if (clk_set_min_rate(mdp_clk,
1096 min_clk_rate) < 0)
1097 printk(KERN_ERR "%s: clk_set_min_rate failed\n",
1098 __func__);
1099 else {
1100 count = 0;
1101 current_rate = clk_get_rate(mdp_clk);
1102 while (count < mdp_pdata->num_mdp_clk) {
1103 if (mdp_pdata->mdp_core_clk_table[count]
1104 < current_rate) {
1105 mdp_pdata->
1106 mdp_core_clk_table[count] =
1107 current_rate;
1108 }
1109 count++;
1110 }
1111 }
1112 }
1113}
1114#endif
1115
1116#ifdef CONFIG_MSM_BUS_SCALING
1117static uint32_t mdp_bus_scale_handle;
1118int mdp_bus_scale_update_request(uint32_t index)
1119{
1120 if (!mdp_pdata && (!mdp_pdata->mdp_bus_scale_table
1121 || index > (mdp_pdata->mdp_bus_scale_table->num_usecases - 1))) {
1122 printk(KERN_ERR "%s invalid table or index\n", __func__);
1123 return -EINVAL;
1124 }
1125 if (mdp_bus_scale_handle < 1) {
1126 printk(KERN_ERR "%s invalid bus handle\n", __func__);
1127 return -EINVAL;
1128 }
1129 return msm_bus_scale_client_update_request(mdp_bus_scale_handle,
1130 index);
1131}
1132#endif
1133DEFINE_MUTEX(mdp_clk_lock);
1134int mdp_set_core_clk(uint16 perf_level)
1135{
1136 int ret = -EINVAL;
1137 if (mdp_clk && mdp_pdata
1138 && mdp_pdata->mdp_core_clk_table) {
1139 if (perf_level > mdp_pdata->num_mdp_clk)
1140 printk(KERN_ERR "%s invalid perf level\n", __func__);
1141 else {
1142 mutex_lock(&mdp_clk_lock);
1143 if (mdp4_extn_disp)
1144 perf_level = 1;
1145 ret = clk_set_rate(mdp_clk,
1146 mdp_pdata->
1147 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1148 - perf_level]);
1149 mutex_unlock(&mdp_clk_lock);
1150 if (ret) {
1151 printk(KERN_ERR "%s unable to set mdp_core_clk rate\n",
1152 __func__);
1153 }
1154 }
1155 }
1156 return ret;
1157}
1158
1159unsigned long mdp_get_core_clk(void)
1160{
1161 unsigned long clk_rate = 0;
1162 if (mdp_clk) {
1163 mutex_lock(&mdp_clk_lock);
1164 clk_rate = clk_get_rate(mdp_clk);
1165 mutex_unlock(&mdp_clk_lock);
1166 }
1167
1168 return clk_rate;
1169}
1170
1171unsigned long mdp_perf_level2clk_rate(uint32 perf_level)
1172{
1173 unsigned long clk_rate = 0;
1174
1175 if (mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1176 if (perf_level > mdp_pdata->num_mdp_clk) {
1177 printk(KERN_ERR "%s invalid perf level\n", __func__);
1178 clk_rate = mdp_get_core_clk();
1179 } else {
1180 if (mdp4_extn_disp)
1181 perf_level = 1;
1182 clk_rate = mdp_pdata->
1183 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1184 - perf_level];
1185 }
1186 } else
1187 clk_rate = mdp_get_core_clk();
1188
1189 return clk_rate;
1190}
1191
1192static int mdp_irq_clk_setup(void)
1193{
1194 int ret;
1195
1196#ifdef CONFIG_FB_MSM_MDP40
1197 ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
1198#else
1199 ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
1200#endif
1201 if (ret) {
1202 printk(KERN_ERR "mdp request_irq() failed!\n");
1203 return ret;
1204 }
1205 disable_irq(mdp_irq);
1206
1207 footswitch = regulator_get(NULL, "fs_mdp");
1208 if (IS_ERR(footswitch))
1209 footswitch = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210
1211 mdp_clk = clk_get(NULL, "mdp_clk");
1212 if (IS_ERR(mdp_clk)) {
1213 ret = PTR_ERR(mdp_clk);
1214 printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
1215 free_irq(mdp_irq, 0);
1216 return ret;
1217 }
1218
1219 mdp_pclk = clk_get(NULL, "mdp_pclk");
1220 if (IS_ERR(mdp_pclk))
1221 mdp_pclk = NULL;
1222
1223 if (mdp_rev == MDP_REV_42) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224 mdp_lut_clk = clk_get(NULL, "lut_mdp");
1225 if (IS_ERR(mdp_lut_clk)) {
1226 ret = PTR_ERR(mdp_lut_clk);
1227 pr_err("can't get mdp_clk error:%d!\n", ret);
1228 clk_put(mdp_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 free_irq(mdp_irq, 0);
1230 return ret;
1231 }
1232 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233 mdp_lut_clk = NULL;
1234 }
1235
1236#ifdef CONFIG_FB_MSM_MDP40
1237 /*
1238 * mdp_clk should greater than mdp_pclk always
1239 */
1240 if (mdp_pdata && mdp_pdata->mdp_core_clk_rate) {
1241 mutex_lock(&mdp_clk_lock);
1242 clk_set_rate(mdp_clk, mdp_pdata->mdp_core_clk_rate);
1243 if (mdp_lut_clk != NULL)
1244 clk_set_rate(mdp_lut_clk, mdp_pdata->mdp_core_clk_rate);
1245 mutex_unlock(&mdp_clk_lock);
1246 }
1247 MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
1248#endif
1249 return 0;
1250}
1251
1252static int mdp_probe(struct platform_device *pdev)
1253{
1254 struct platform_device *msm_fb_dev = NULL;
1255 struct msm_fb_data_type *mfd;
1256 struct msm_fb_panel_data *pdata = NULL;
1257 int rc;
1258 resource_size_t size ;
1259#ifdef CONFIG_FB_MSM_MDP40
1260 int intf, if_no;
1261#else
1262 unsigned long flag;
1263#endif
1264#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
1265 struct mipi_panel_info *mipi;
1266#endif
1267
1268 if ((pdev->id == 0) && (pdev->num_resources > 0)) {
1269 mdp_pdata = pdev->dev.platform_data;
1270
1271 size = resource_size(&pdev->resource[0]);
1272 msm_mdp_base = ioremap(pdev->resource[0].start, size);
1273
1274 MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
1275 (int)pdev->resource[0].start, (int)msm_mdp_base);
1276
1277 if (unlikely(!msm_mdp_base))
1278 return -ENOMEM;
1279
1280 mdp_irq = platform_get_irq(pdev, 0);
1281 if (mdp_irq < 0) {
1282 pr_err("mdp: can not get mdp irq\n");
1283 return -ENOMEM;
1284 }
1285
1286 mdp_rev = mdp_pdata->mdp_rev;
1287 rc = mdp_irq_clk_setup();
1288
1289 if (rc)
1290 return rc;
1291
1292 mdp_hw_version();
1293
1294 /* initializing mdp hw */
1295#ifdef CONFIG_FB_MSM_MDP40
1296 mdp4_hw_init();
1297 mdp4_fetch_cfg(clk_get_rate(mdp_clk));
1298#else
1299 mdp_hw_init();
1300#endif
1301
1302#ifdef CONFIG_FB_MSM_OVERLAY
1303 mdp_hw_cursor_init();
1304#endif
1305
1306 mdp_resource_initialized = 1;
1307 return 0;
1308 }
1309
1310 if (!mdp_resource_initialized)
1311 return -EPERM;
1312
1313 mfd = platform_get_drvdata(pdev);
1314
1315 if (!mfd)
1316 return -ENODEV;
1317
1318 if (mfd->key != MFD_KEY)
1319 return -EINVAL;
1320
1321 if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
1322 return -ENOMEM;
1323
1324 msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
1325 if (!msm_fb_dev)
1326 return -ENOMEM;
1327
1328 /* link to the latest pdev */
1329 mfd->pdev = msm_fb_dev;
1330
1331 /* add panel data */
1332 if (platform_device_add_data
1333 (msm_fb_dev, pdev->dev.platform_data,
1334 sizeof(struct msm_fb_panel_data))) {
1335 printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
1336 rc = -ENOMEM;
1337 goto mdp_probe_err;
1338 }
1339 /* data chain */
1340 pdata = msm_fb_dev->dev.platform_data;
1341 pdata->on = mdp_on;
1342 pdata->off = mdp_off;
1343 pdata->next = pdev;
1344
1345 mdp_prim_panel_type = mfd->panel.type;
1346 switch (mfd->panel.type) {
1347 case EXT_MDDI_PANEL:
1348 case MDDI_PANEL:
1349 case EBI2_PANEL:
1350 INIT_WORK(&mfd->dma_update_worker,
1351 mdp_lcd_update_workqueue_handler);
1352 INIT_WORK(&mfd->vsync_resync_worker,
1353 mdp_vsync_resync_workqueue_handler);
1354 mfd->hw_refresh = FALSE;
1355
1356 if (mfd->panel.type == EXT_MDDI_PANEL) {
1357 /* 15 fps -> 66 msec */
1358 mfd->refresh_timer_duration = (66 * HZ / 1000);
1359 } else {
1360 /* 24 fps -> 42 msec */
1361 mfd->refresh_timer_duration = (42 * HZ / 1000);
1362 }
1363
1364#ifdef CONFIG_FB_MSM_MDP22
1365 mfd->dma_fnc = mdp_dma2_update;
1366 mfd->dma = &dma2_data;
1367#else
1368 if (mfd->panel_info.pdest == DISPLAY_1) {
1369#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
1370 mfd->dma_fnc = mdp4_mddi_overlay;
1371 mfd->cursor_update = mdp4_mddi_overlay_cursor;
1372#else
1373 mfd->dma_fnc = mdp_dma2_update;
1374#endif
1375 mfd->dma = &dma2_data;
1376 mfd->lut_update = mdp_lut_update_nonlcdc;
1377 mfd->do_histogram = mdp_do_histogram;
1378 } else {
1379 mfd->dma_fnc = mdp_dma_s_update;
1380 mfd->dma = &dma_s_data;
1381 }
1382#endif
1383 if (mdp_pdata)
1384 mfd->vsync_gpio = mdp_pdata->gpio;
1385 else
1386 mfd->vsync_gpio = -1;
1387
1388#ifdef CONFIG_FB_MSM_MDP40
1389 if (mfd->panel.type == EBI2_PANEL)
1390 intf = EBI2_INTF;
1391 else
1392 intf = MDDI_INTF;
1393
1394 if (mfd->panel_info.pdest == DISPLAY_1)
1395 if_no = PRIMARY_INTF_SEL;
1396 else
1397 if_no = SECONDARY_INTF_SEL;
1398
1399 mdp4_display_intf_sel(if_no, intf);
1400#endif
1401 mdp_config_vsync(mfd);
1402 break;
1403
1404#ifdef CONFIG_FB_MSM_MIPI_DSI
1405 case MIPI_VIDEO_PANEL:
1406#ifndef CONFIG_FB_MSM_MDP303
1407 pdata->on = mdp4_dsi_video_on;
1408 pdata->off = mdp4_dsi_video_off;
1409 mfd->hw_refresh = TRUE;
1410 mfd->dma_fnc = mdp4_dsi_video_overlay;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -07001411 mfd->lut_update = mdp_lut_update_lcdc;
1412 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413 if (mfd->panel_info.pdest == DISPLAY_1) {
1414 if_no = PRIMARY_INTF_SEL;
1415 mfd->dma = &dma2_data;
1416 } else {
1417 if_no = EXTERNAL_INTF_SEL;
1418 mfd->dma = &dma_e_data;
1419 }
1420 mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
1421#else
1422 pdata->on = mdp_dsi_video_on;
1423 pdata->off = mdp_dsi_video_off;
1424 mfd->hw_refresh = TRUE;
1425 mfd->dma_fnc = mdp_dsi_video_update;
1426 mfd->do_histogram = mdp_do_histogram;
1427 if (mfd->panel_info.pdest == DISPLAY_1)
1428 mfd->dma = &dma2_data;
1429 else {
1430 printk(KERN_ERR "Invalid Selection of destination panel\n");
1431 rc = -ENODEV;
1432 goto mdp_probe_err;
1433 }
1434
1435#endif
1436 break;
1437
1438 case MIPI_CMD_PANEL:
1439#ifndef CONFIG_FB_MSM_MDP303
1440 mfd->dma_fnc = mdp4_dsi_cmd_overlay;
1441#ifdef CONFIG_FB_MSM_MDP40
1442 mipi = &mfd->panel_info.mipi;
1443 configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
1444#endif
1445 if (mfd->panel_info.pdest == DISPLAY_1) {
1446 if_no = PRIMARY_INTF_SEL;
1447 mfd->dma = &dma2_data;
1448 } else {
1449 if_no = SECONDARY_INTF_SEL;
1450 mfd->dma = &dma_s_data;
1451 }
1452 mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
1453#else
1454 mfd->dma_fnc = mdp_dma2_update;
1455 mfd->do_histogram = mdp_do_histogram;
1456 if (mfd->panel_info.pdest == DISPLAY_1)
1457 mfd->dma = &dma2_data;
1458 else {
1459 printk(KERN_ERR "Invalid Selection of destination panel\n");
1460 rc = -ENODEV;
1461 goto mdp_probe_err;
1462 }
1463#endif
1464 mdp_config_vsync(mfd);
1465 break;
1466#endif
1467
1468#ifdef CONFIG_FB_MSM_DTV
1469 case DTV_PANEL:
1470 pdata->on = mdp4_dtv_on;
1471 pdata->off = mdp4_dtv_off;
1472 mfd->hw_refresh = TRUE;
1473 mfd->cursor_update = mdp_hw_cursor_update;
1474 mfd->dma_fnc = mdp4_dtv_overlay;
1475 mfd->dma = &dma_e_data;
1476 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1477 break;
1478#endif
1479 case HDMI_PANEL:
1480 case LCDC_PANEL:
1481 pdata->on = mdp_lcdc_on;
1482 pdata->off = mdp_lcdc_off;
1483 mfd->hw_refresh = TRUE;
1484#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
1485 mfd->cursor_update = mdp_hw_cursor_sync_update;
1486#else
1487 mfd->cursor_update = mdp_hw_cursor_update;
1488#endif
1489#ifndef CONFIG_FB_MSM_MDP22
1490 mfd->lut_update = mdp_lut_update_lcdc;
1491 mfd->do_histogram = mdp_do_histogram;
1492#endif
1493#ifdef CONFIG_FB_MSM_OVERLAY
1494 mfd->dma_fnc = mdp4_lcdc_overlay;
1495#else
1496 mfd->dma_fnc = mdp_lcdc_update;
1497#endif
1498
1499#ifdef CONFIG_FB_MSM_MDP40
1500 configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
1501 * 23 / 20);
1502 if (mfd->panel.type == HDMI_PANEL) {
1503 mfd->dma = &dma_e_data;
1504 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
1505 } else {
1506 mfd->dma = &dma2_data;
1507 mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
1508 }
1509#else
1510 mfd->dma = &dma2_data;
1511 spin_lock_irqsave(&mdp_spin_lock, flag);
1512 mdp_intr_mask &= ~MDP_DMA_P_DONE;
1513 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
1514 spin_unlock_irqrestore(&mdp_spin_lock, flag);
1515#endif
1516 break;
1517
1518 case TV_PANEL:
1519#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
1520 pdata->on = mdp4_atv_on;
1521 pdata->off = mdp4_atv_off;
1522 mfd->dma_fnc = mdp4_atv_overlay;
1523 mfd->dma = &dma_e_data;
1524 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
1525#else
1526 pdata->on = mdp_dma3_on;
1527 pdata->off = mdp_dma3_off;
1528 mfd->hw_refresh = TRUE;
1529 mfd->dma_fnc = mdp_dma3_update;
1530 mfd->dma = &dma3_data;
1531#endif
1532 break;
1533
1534 default:
1535 printk(KERN_ERR "mdp_probe: unknown device type!\n");
1536 rc = -ENODEV;
1537 goto mdp_probe_err;
1538 }
1539#ifdef CONFIG_FB_MSM_MDP40
1540 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1541 mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
1542 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1543#endif
1544
1545#ifdef CONFIG_MSM_BUS_SCALING
1546 if (!mdp_bus_scale_handle && mdp_pdata &&
1547 mdp_pdata->mdp_bus_scale_table) {
1548 mdp_bus_scale_handle =
1549 msm_bus_scale_register_client(
1550 mdp_pdata->mdp_bus_scale_table);
1551 if (!mdp_bus_scale_handle) {
1552 printk(KERN_ERR "%s not able to get bus scale\n",
1553 __func__);
1554 return -ENOMEM;
1555 }
1556 }
1557#endif
1558 /* set driver data */
1559 platform_set_drvdata(msm_fb_dev, mfd);
1560
1561 rc = platform_device_add(msm_fb_dev);
1562 if (rc) {
1563 goto mdp_probe_err;
1564 }
1565
1566 pm_runtime_set_active(&pdev->dev);
1567 pm_runtime_enable(&pdev->dev);
1568
1569 pdev_list[pdev_list_cnt++] = pdev;
1570 mdp4_extn_disp = 0;
1571 return 0;
1572
1573 mdp_probe_err:
1574 platform_device_put(msm_fb_dev);
1575#ifdef CONFIG_MSM_BUS_SCALING
1576 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1577 mdp_bus_scale_handle > 0)
1578 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1579#endif
1580 return rc;
1581}
1582
1583#ifdef CONFIG_PM
1584static void mdp_suspend_sub(void)
1585{
1586 /* cancel pipe ctrl worker */
1587 cancel_delayed_work(&mdp_pipe_ctrl_worker);
1588
1589 /* for workder can't be cancelled... */
1590 flush_workqueue(mdp_pipe_ctrl_wq);
1591
1592 /* let's wait for PPP completion */
1593 while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
1594 cpu_relax();
1595
1596 /* try to power down */
1597 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1598
1599 mutex_lock(&mdp_suspend_mutex);
1600 mdp_suspended = TRUE;
1601 mutex_unlock(&mdp_suspend_mutex);
1602}
1603#endif
1604
1605#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1606static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
1607{
1608 if (pdev->id == 0) {
1609 mdp_suspend_sub();
1610 if (mdp_current_clk_on) {
1611 printk(KERN_WARNING"MDP suspend failed\n");
1612 return -EBUSY;
1613 }
1614 }
1615
1616 return 0;
1617}
1618#endif
1619
1620#ifdef CONFIG_HAS_EARLYSUSPEND
1621static void mdp_early_suspend(struct early_suspend *h)
1622{
1623 mdp_suspend_sub();
1624}
1625
1626static void mdp_early_resume(struct early_suspend *h)
1627{
1628 mutex_lock(&mdp_suspend_mutex);
1629 mdp_suspended = FALSE;
1630 mutex_unlock(&mdp_suspend_mutex);
1631}
1632#endif
1633
1634static int mdp_remove(struct platform_device *pdev)
1635{
1636 if (footswitch != NULL)
1637 regulator_put(footswitch);
1638 iounmap(msm_mdp_base);
1639 pm_runtime_disable(&pdev->dev);
1640#ifdef CONFIG_MSM_BUS_SCALING
1641 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1642 mdp_bus_scale_handle > 0)
1643 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1644#endif
1645 return 0;
1646}
1647
1648static int mdp_register_driver(void)
1649{
1650#ifdef CONFIG_HAS_EARLYSUSPEND
1651 early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
1652 early_suspend.suspend = mdp_early_suspend;
1653 early_suspend.resume = mdp_early_resume;
1654 register_early_suspend(&early_suspend);
1655#endif
1656
1657 return platform_driver_register(&mdp_driver);
1658}
1659
1660static int __init mdp_driver_init(void)
1661{
1662 int ret;
1663
1664 mdp_drv_init();
1665
1666 ret = mdp_register_driver();
1667 if (ret) {
1668 printk(KERN_ERR "mdp_register_driver() failed!\n");
1669 return ret;
1670 }
1671
1672#if defined(CONFIG_DEBUG_FS)
1673 mdp_debugfs_init();
1674#endif
1675
1676 return 0;
1677
1678}
1679
1680module_init(mdp_driver_init);