blob: 994d42ba9eaa216e184e7cf8f6be653b4070b0aa [file] [log] [blame]
Pavel Machekd480ace2009-09-22 16:47:03 -07001/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
Pavel Machekd480ace2009-09-22 16:47:03 -07006 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/module.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070019#include <linux/kernel.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/sched.h>
21#include <linux/time.h>
22#include <linux/init.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070023#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <linux/spinlock.h>
25#include <linux/hrtimer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070026#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/hardware.h>
28#include <linux/io.h>
29#include <linux/debugfs.h>
30#include <linux/delay.h>
31#include <linux/mutex.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <asm/system.h>
36#include <asm/mach-types.h>
37#include <linux/semaphore.h>
38#include <linux/uaccess.h>
39#include <mach/clk.h>
40#include "mdp.h"
41#include "msm_fb.h"
42#ifdef CONFIG_FB_MSM_MDP40
43#include "mdp4.h"
44#endif
45#include "mipi_dsi.h"
Pavel Machekd480ace2009-09-22 16:47:03 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047uint32 mdp4_extn_disp;
Pavel Machekd480ace2009-09-22 16:47:03 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049static struct clk *mdp_clk;
50static struct clk *mdp_pclk;
51static struct clk *mdp_axi_clk;
52static struct clk *mdp_lut_clk;
53int mdp_rev;
Pavel Machekd480ace2009-09-22 16:47:03 -070054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055struct regulator *footswitch;
Pavel Machekd480ace2009-09-22 16:47:03 -070056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057struct completion mdp_ppp_comp;
58struct semaphore mdp_ppp_mutex;
59struct semaphore mdp_pipe_ctrl_mutex;
Pavel Machekd480ace2009-09-22 16:47:03 -070060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061unsigned long mdp_timer_duration = (HZ/20); /* 50 msecond */
Pavel Machekd480ace2009-09-22 16:47:03 -070062
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063boolean mdp_ppp_waiting = FALSE;
64uint32 mdp_tv_underflow_cnt;
65uint32 mdp_lcdc_underflow_cnt;
66
67boolean mdp_current_clk_on = FALSE;
68boolean mdp_is_in_isr = FALSE;
69
70/*
71 * legacy mdp_in_processing is only for DMA2-MDDI
72 * this applies to DMA2 block only
73 */
74uint32 mdp_in_processing = FALSE;
75
76#ifdef CONFIG_FB_MSM_MDP40
77uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
78#else
79uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
80#endif
81
82MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
83
84atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
85
86spinlock_t mdp_spin_lock;
87struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
88struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
89
90static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
91static struct delayed_work mdp_pipe_ctrl_worker;
92
93static boolean mdp_suspended = FALSE;
94DEFINE_MUTEX(mdp_suspend_mutex);
95
96#ifdef CONFIG_FB_MSM_MDP40
97struct mdp_dma_data dma2_data;
98struct mdp_dma_data dma_s_data;
99struct mdp_dma_data dma_e_data;
100ulong mdp4_display_intf;
101#else
102static struct mdp_dma_data dma2_data;
103static struct mdp_dma_data dma_s_data;
104#ifndef CONFIG_FB_MSM_MDP303
105static struct mdp_dma_data dma_e_data;
106#endif
107#endif
108static struct mdp_dma_data dma3_data;
109
110extern ktime_t mdp_dma2_last_update_time;
111
112extern uint32 mdp_dma2_update_time_in_usec;
113extern int mdp_lcd_rd_cnt_offset_slow;
114extern int mdp_lcd_rd_cnt_offset_fast;
115extern int mdp_usec_diff_threshold;
116
117#ifdef CONFIG_FB_MSM_LCDC
118extern int first_pixel_start_x;
119extern int first_pixel_start_y;
120#endif
121
122#ifdef MSM_FB_ENABLE_DBGFS
123struct dentry *mdp_dir;
124#endif
125
126#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
127static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
128#else
129#define mdp_suspend NULL
130#endif
131
132struct timeval mdp_dma2_timeval;
133struct timeval mdp_ppp_timeval;
134
135#ifdef CONFIG_HAS_EARLYSUSPEND
136static struct early_suspend early_suspend;
137#endif
138
139static u32 mdp_irq;
140
141static uint32 mdp_prim_panel_type = NO_PANEL;
142#ifndef CONFIG_FB_MSM_MDP22
143DEFINE_MUTEX(mdp_lut_push_sem);
144static int mdp_lut_i;
145static int mdp_lut_hw_update(struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700146{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147 int i;
148 u16 *c[3];
149 u16 r, g, b;
Pavel Machekd480ace2009-09-22 16:47:03 -0700150
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 c[0] = cmap->green;
152 c[1] = cmap->blue;
153 c[2] = cmap->red;
Pavel Machekd480ace2009-09-22 16:47:03 -0700154
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 for (i = 0; i < cmap->len; i++) {
156 if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
157 copy_from_user(&g, cmap->green++, sizeof(g)) ||
158 copy_from_user(&b, cmap->blue++, sizeof(b)))
159 return -EFAULT;
160
161#ifdef CONFIG_FB_MSM_MDP40
162 MDP_OUTP(MDP_BASE + 0x94800 +
163#else
164 MDP_OUTP(MDP_BASE + 0x93800 +
165#endif
166 (0x400*mdp_lut_i) + cmap->start*4 + i*4,
167 ((g & 0xff) |
168 ((b & 0xff) << 8) |
169 ((r & 0xff) << 16)));
Pavel Machekd480ace2009-09-22 16:47:03 -0700170 }
171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700173}
174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175static int mdp_lut_push;
176static int mdp_lut_push_i;
177static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700178{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 int ret;
180
181 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
182 ret = mdp_lut_hw_update(cmap);
183 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
184
185 if (ret)
186 return ret;
187
188 mutex_lock(&mdp_lut_push_sem);
189 mdp_lut_push = 1;
190 mdp_lut_push_i = mdp_lut_i;
191 mutex_unlock(&mdp_lut_push_sem);
192
193 mdp_lut_i = (mdp_lut_i + 1)%2;
194
195 return 0;
196}
197
198static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
199{
200 int ret;
201
202 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
203 ret = mdp_lut_hw_update(cmap);
204
205 if (ret) {
206 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
207 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -0700208 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209
210 MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
211 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
212 mdp_lut_i = (mdp_lut_i + 1)%2;
213
214 return 0;
215}
216
217static void mdp_lut_enable(void)
218{
219 if (mdp_lut_push) {
220 mutex_lock(&mdp_lut_push_sem);
221 mdp_lut_push = 0;
222 MDP_OUTP(MDP_BASE + 0x90070,
223 (mdp_lut_push_i << 10) | 0x17);
224 mutex_unlock(&mdp_lut_push_sem);
225 }
226}
227
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700228#define MDP_REV42_HIST_MAX_BIN 128
229#define MDP_REV41_HIST_MAX_BIN 32
230static __u32 mdp_hist_r[MDP_REV42_HIST_MAX_BIN];
231static __u32 mdp_hist_g[MDP_REV42_HIST_MAX_BIN];
232static __u32 mdp_hist_b[MDP_REV42_HIST_MAX_BIN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233
234#ifdef CONFIG_FB_MSM_MDP40
235struct mdp_histogram mdp_hist;
236struct completion mdp_hist_comp;
237boolean mdp_is_hist_start = FALSE;
238#else
239static struct mdp_histogram mdp_hist;
240static struct completion mdp_hist_comp;
241static boolean mdp_is_hist_start = FALSE;
242#endif
243static DEFINE_MUTEX(mdp_hist_mutex);
244
245int mdp_histogram_ctrl(boolean en)
246{
247 unsigned long flag;
248 boolean hist_start;
249 spin_lock_irqsave(&mdp_spin_lock, flag);
250 hist_start = mdp_is_hist_start;
251 spin_unlock_irqrestore(&mdp_spin_lock, flag);
252
253 if (hist_start == TRUE) {
254 if (en == TRUE) {
255 mdp_enable_irq(MDP_HISTOGRAM_TERM);
256 mdp_hist.frame_cnt = 1;
257 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
258#ifdef CONFIG_FB_MSM_MDP40
259 MDP_OUTP(MDP_BASE + 0x95010, 1);
260 MDP_OUTP(MDP_BASE + 0x9501c, INTR_HIST_DONE);
261 MDP_OUTP(MDP_BASE + 0x95004, 1);
262 MDP_OUTP(MDP_BASE + 0x95000, 1);
263#else
264 MDP_OUTP(MDP_BASE + 0x94004, 1);
265 MDP_OUTP(MDP_BASE + 0x94000, 1);
266#endif
267 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
268 FALSE);
269 } else
270 mdp_disable_irq(MDP_HISTOGRAM_TERM);
Pavel Machekd480ace2009-09-22 16:47:03 -0700271 }
272 return 0;
273}
274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275int mdp_start_histogram(struct fb_info *info)
Pavel Machekd480ace2009-09-22 16:47:03 -0700276{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277 unsigned long flag;
Pavel Machekd480ace2009-09-22 16:47:03 -0700278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 int ret = 0;
280 mutex_lock(&mdp_hist_mutex);
281 if (mdp_is_hist_start == TRUE) {
282 printk(KERN_ERR "%s histogram already started\n", __func__);
283 ret = -EPERM;
284 goto mdp_hist_start_err;
285 }
286
287 spin_lock_irqsave(&mdp_spin_lock, flag);
288 mdp_is_hist_start = TRUE;
289 spin_unlock_irqrestore(&mdp_spin_lock, flag);
290 mdp_enable_irq(MDP_HISTOGRAM_TERM);
291 mdp_hist.frame_cnt = 1;
292 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
293#ifdef CONFIG_FB_MSM_MDP40
294 MDP_OUTP(MDP_BASE + 0x95004, 1);
295 MDP_OUTP(MDP_BASE + 0x95000, 1);
296#else
297 MDP_OUTP(MDP_BASE + 0x94004, 1);
298 MDP_OUTP(MDP_BASE + 0x94000, 1);
299#endif
300 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
301
302mdp_hist_start_err:
303 mutex_unlock(&mdp_hist_mutex);
304 return ret;
305
306}
307int mdp_stop_histogram(struct fb_info *info)
308{
309 unsigned long flag;
310 int ret = 0;
311 mutex_lock(&mdp_hist_mutex);
312 if (!mdp_is_hist_start) {
313 printk(KERN_ERR "%s histogram already stopped\n", __func__);
314 ret = -EPERM;
315 goto mdp_hist_stop_err;
316 }
317 spin_lock_irqsave(&mdp_spin_lock, flag);
318 mdp_is_hist_start = FALSE;
319 spin_unlock_irqrestore(&mdp_spin_lock, flag);
320 /* disable the irq for histogram since we handled it
321 when the control reaches here */
322 mdp_disable_irq(MDP_HISTOGRAM_TERM);
323
324mdp_hist_stop_err:
325 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700326 return ret;
327}
328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
Pavel Machekd480ace2009-09-22 16:47:03 -0700330{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331 int ret = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700332
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700333 if (!hist->frame_cnt || (hist->bin_cnt == 0))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 return -EINVAL;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700335
336 if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN)
337 || (mdp_rev == MDP_REV_42 &&
338 hist->bin_cnt > MDP_REV42_HIST_MAX_BIN))
339 return -EINVAL;
340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341 mutex_lock(&mdp_hist_mutex);
342 if (!mdp_is_hist_start) {
343 printk(KERN_ERR "%s histogram not started\n", __func__);
344 mutex_unlock(&mdp_hist_mutex);
345 return -EPERM;
346 }
347 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 INIT_COMPLETION(mdp_hist_comp);
Pavel Machekd480ace2009-09-22 16:47:03 -0700350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 mdp_hist.bin_cnt = hist->bin_cnt;
352 mdp_hist.frame_cnt = hist->frame_cnt;
353 mdp_hist.r = (hist->r) ? mdp_hist_r : 0;
354 mdp_hist.g = (hist->g) ? mdp_hist_g : 0;
355 mdp_hist.b = (hist->b) ? mdp_hist_b : 0;
356
357 wait_for_completion_killable(&mdp_hist_comp);
358
359 if (hist->r) {
360 ret = copy_to_user(hist->r, mdp_hist.r, hist->bin_cnt*4);
361 if (ret)
362 goto hist_err;
363 }
364 if (hist->g) {
365 ret = copy_to_user(hist->g, mdp_hist.g, hist->bin_cnt*4);
366 if (ret)
367 goto hist_err;
368 }
369 if (hist->b) {
370 ret = copy_to_user(hist->b, mdp_hist.b, hist->bin_cnt*4);
371 if (ret)
372 goto hist_err;
373 }
374 return 0;
375
376hist_err:
377 printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
378 return ret;
379}
380#endif
381
382/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
383
384int mdp_ppp_pipe_wait(void)
385{
386 int ret = 1;
387
388 /* wait 5 seconds for the operation to complete before declaring
389 the MDP hung */
390
391 if (mdp_ppp_waiting == TRUE) {
392 ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
393 5 * HZ);
394
395 if (!ret)
396 printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
397 __func__);
Pavel Machekd480ace2009-09-22 16:47:03 -0700398 }
399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400 return ret;
401}
Pavel Machekd480ace2009-09-22 16:47:03 -0700402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403static DEFINE_SPINLOCK(mdp_lock);
404static int mdp_irq_mask;
405static int mdp_irq_enabled;
Pavel Machekd480ace2009-09-22 16:47:03 -0700406
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407/*
408 * mdp_enable_irq: can not be called from isr
409 */
410void mdp_enable_irq(uint32 term)
411{
412 unsigned long irq_flags;
413
414 spin_lock_irqsave(&mdp_lock, irq_flags);
415 if (mdp_irq_mask & term) {
416 printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
417 __func__, term, mdp_irq_mask, mdp_irq_enabled);
418 } else {
419 mdp_irq_mask |= term;
420 if (mdp_irq_mask && !mdp_irq_enabled) {
421 mdp_irq_enabled = 1;
422 enable_irq(mdp_irq);
423 }
424 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700425 spin_unlock_irqrestore(&mdp_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426}
427
428/*
429 * mdp_disable_irq: can not be called from isr
430 */
431void mdp_disable_irq(uint32 term)
432{
433 unsigned long irq_flags;
434
435 spin_lock_irqsave(&mdp_lock, irq_flags);
436 if (!(mdp_irq_mask & term)) {
437 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
438 __func__, term, mdp_irq_mask, mdp_irq_enabled);
439 } else {
440 mdp_irq_mask &= ~term;
441 if (!mdp_irq_mask && mdp_irq_enabled) {
442 mdp_irq_enabled = 0;
443 disable_irq(mdp_irq);
444 }
445 }
446 spin_unlock_irqrestore(&mdp_lock, irq_flags);
447}
448
449void mdp_disable_irq_nosync(uint32 term)
450{
451 spin_lock(&mdp_lock);
452 if (!(mdp_irq_mask & term)) {
453 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
454 __func__, term, mdp_irq_mask, mdp_irq_enabled);
455 } else {
456 mdp_irq_mask &= ~term;
457 if (!mdp_irq_mask && mdp_irq_enabled) {
458 mdp_irq_enabled = 0;
459 disable_irq_nosync(mdp_irq);
460 }
461 }
462 spin_unlock(&mdp_lock);
463}
464
465void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
466{
467 /* complete all the writes before starting */
468 wmb();
469
470 /* kick off PPP engine */
471 if (term == MDP_PPP_TERM) {
472 if (mdp_debug[MDP_PPP_BLOCK])
473 jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
474
475 /* let's turn on PPP block */
476 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
477
478 mdp_enable_irq(term);
479 INIT_COMPLETION(mdp_ppp_comp);
480 mdp_ppp_waiting = TRUE;
481 outpdw(MDP_BASE + 0x30, 0x1000);
482 wait_for_completion_killable(&mdp_ppp_comp);
483 mdp_disable_irq(term);
484
485 if (mdp_debug[MDP_PPP_BLOCK]) {
486 struct timeval now;
487
488 jiffies_to_timeval(jiffies, &now);
489 mdp_ppp_timeval.tv_usec =
490 now.tv_usec - mdp_ppp_timeval.tv_usec;
491 MSM_FB_DEBUG("MDP-PPP: %d\n",
492 (int)mdp_ppp_timeval.tv_usec);
493 }
494 } else if (term == MDP_DMA2_TERM) {
495 if (mdp_debug[MDP_DMA2_BLOCK]) {
496 MSM_FB_DEBUG("MDP-DMA2: %d\n",
497 (int)mdp_dma2_timeval.tv_usec);
498 jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
499 }
500 /* DMA update timestamp */
501 mdp_dma2_last_update_time = ktime_get_real();
502 /* let's turn on DMA2 block */
503#if 0
504 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
505#endif
506#ifdef CONFIG_FB_MSM_MDP22
507 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
508#else
509 mdp_lut_enable();
510
511#ifdef CONFIG_FB_MSM_MDP40
512 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
513#else
514 outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
515
516#ifdef CONFIG_FB_MSM_MDP303
517
518#ifdef CONFIG_FB_MSM_MIPI_DSI
519 mipi_dsi_cmd_mdp_sw_trigger();
520#endif
521
522#endif
523
524#endif
525#endif
526#ifdef CONFIG_FB_MSM_MDP40
527 } else if (term == MDP_DMA_S_TERM) {
528 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
529 outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
530 } else if (term == MDP_DMA_E_TERM) {
531 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
532 outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
533 } else if (term == MDP_OVERLAY0_TERM) {
534 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
535 mdp_lut_enable();
536 outpdw(MDP_BASE + 0x0004, 0);
537 } else if (term == MDP_OVERLAY1_TERM) {
538 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
539 mdp_lut_enable();
540 outpdw(MDP_BASE + 0x0008, 0);
541 }
542#else
543 } else if (term == MDP_DMA_S_TERM) {
544 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
545 outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
546 } else if (term == MDP_DMA_E_TERM) {
547 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
548 outpdw(MDP_BASE + 0x004C, 0x0);
549 }
550#endif
551}
552static int mdp_clk_rate;
553static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
554static int pdev_list_cnt;
555
556static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
557{
558 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
559}
560void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
561 boolean isr)
562{
563 boolean mdp_all_blocks_off = TRUE;
564 int i;
565 unsigned long flag;
566 struct msm_fb_panel_data *pdata;
567
568 /*
569 * It is assumed that if isr = TRUE then start = OFF
570 * if start = ON when isr = TRUE it could happen that the usercontext
571 * could turn off the clocks while the interrupt is updating the
572 * power to ON
573 */
574 WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
575
576 spin_lock_irqsave(&mdp_spin_lock, flag);
577 if (MDP_BLOCK_POWER_ON == state) {
578 atomic_inc(&mdp_block_power_cnt[block]);
579
580 if (MDP_DMA2_BLOCK == block)
581 mdp_in_processing = TRUE;
582 } else {
583 atomic_dec(&mdp_block_power_cnt[block]);
584
585 if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
586 /*
587 * Master has to serve a request to power off MDP always
588 * It also has a timer to power off. So, in case of
589 * timer expires first and DMA2 finishes later,
590 * master has to power off two times
591 * There shouldn't be multiple power-off request for
592 * other blocks
593 */
594 if (block != MDP_MASTER_BLOCK) {
595 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
596 multiple power-off request\n", block);
597 }
598 atomic_set(&mdp_block_power_cnt[block], 0);
599 }
600
601 if (MDP_DMA2_BLOCK == block)
602 mdp_in_processing = FALSE;
603 }
604 spin_unlock_irqrestore(&mdp_spin_lock, flag);
605
606 /*
607 * If it's in isr, we send our request to workqueue.
608 * Otherwise, processing happens in the current context
609 */
610 if (isr) {
611 if (mdp_current_clk_on) {
612 /* checking all blocks power state */
613 for (i = 0; i < MDP_MAX_BLOCK; i++) {
614 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
615 mdp_all_blocks_off = FALSE;
616 break;
617 }
618 }
619
620 if (mdp_all_blocks_off) {
621 /* send workqueue to turn off mdp power */
622 queue_delayed_work(mdp_pipe_ctrl_wq,
623 &mdp_pipe_ctrl_worker,
624 mdp_timer_duration);
625 }
626 }
627 } else {
628 down(&mdp_pipe_ctrl_mutex);
629 /* checking all blocks power state */
630 for (i = 0; i < MDP_MAX_BLOCK; i++) {
631 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
632 mdp_all_blocks_off = FALSE;
633 break;
634 }
635 }
636
637 /*
638 * find out whether a delayable work item is currently
639 * pending
640 */
641
642 if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
643 /*
644 * try to cancel the current work if it fails to
645 * stop (which means del_timer can't delete it
646 * from the list, it's about to expire and run),
647 * we have to let it run. queue_delayed_work won't
648 * accept the next job which is same as
649 * queue_delayed_work(mdp_timer_duration = 0)
650 */
651 cancel_delayed_work(&mdp_pipe_ctrl_worker);
652 }
653
654 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
655 mutex_lock(&mdp_suspend_mutex);
656 if (block == MDP_MASTER_BLOCK || mdp_suspended) {
657 mdp_current_clk_on = FALSE;
658 mb();
659 /* turn off MDP clks */
660 mdp_vsync_clk_disable();
661 for (i = 0; i < pdev_list_cnt; i++) {
662 pdata = (struct msm_fb_panel_data *)
663 pdev_list[i]->dev.platform_data;
664 if (pdata && pdata->clk_func)
665 pdata->clk_func(0);
666 }
667 if (mdp_clk != NULL) {
668 mdp_clk_rate = clk_get_rate(mdp_clk);
669 clk_disable(mdp_clk);
670 if (mdp_hw_revision <=
671 MDP4_REVISION_V2_1 &&
672 mdp_clk_rate > 122880000) {
673 clk_set_rate(mdp_clk,
674 122880000);
675 }
676 MSM_FB_DEBUG("MDP CLK OFF\n");
677 }
678 if (mdp_pclk != NULL) {
679 clk_disable(mdp_pclk);
680 MSM_FB_DEBUG("MDP PCLK OFF\n");
681 }
682 if (mdp_axi_clk != NULL)
683 clk_disable(mdp_axi_clk);
684 if (mdp_lut_clk != NULL)
685 clk_disable(mdp_lut_clk);
686 } else {
687 /* send workqueue to turn off mdp power */
688 queue_delayed_work(mdp_pipe_ctrl_wq,
689 &mdp_pipe_ctrl_worker,
690 mdp_timer_duration);
691 }
692 mutex_unlock(&mdp_suspend_mutex);
693 } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
694 mdp_current_clk_on = TRUE;
695 /* turn on MDP clks */
696 for (i = 0; i < pdev_list_cnt; i++) {
697 pdata = (struct msm_fb_panel_data *)
698 pdev_list[i]->dev.platform_data;
699 if (pdata && pdata->clk_func)
700 pdata->clk_func(1);
701 }
702 if (mdp_clk != NULL) {
703 if (mdp_hw_revision <=
704 MDP4_REVISION_V2_1 &&
705 mdp_clk_rate > 122880000) {
706 clk_set_rate(mdp_clk,
707 mdp_clk_rate);
708 }
709 clk_enable(mdp_clk);
710 MSM_FB_DEBUG("MDP CLK ON\n");
711 }
712 if (mdp_pclk != NULL) {
713 clk_enable(mdp_pclk);
714 MSM_FB_DEBUG("MDP PCLK ON\n");
715 }
716 if (mdp_axi_clk != NULL)
717 clk_enable(mdp_axi_clk);
718 if (mdp_lut_clk != NULL)
719 clk_enable(mdp_lut_clk);
720 mdp_vsync_clk_enable();
721 }
722 up(&mdp_pipe_ctrl_mutex);
723 }
724}
725
726#ifndef CONFIG_FB_MSM_MDP40
727irqreturn_t mdp_isr(int irq, void *ptr)
728{
729 uint32 mdp_interrupt = 0;
730 struct mdp_dma_data *dma;
731
732 mdp_is_in_isr = TRUE;
733 do {
734 mdp_interrupt = inp32(MDP_INTR_STATUS);
735 outp32(MDP_INTR_CLEAR, mdp_interrupt);
736
737 mdp_interrupt &= mdp_intr_mask;
738
739 if (mdp_interrupt & TV_ENC_UNDERRUN) {
740 mdp_interrupt &= ~(TV_ENC_UNDERRUN);
741 mdp_tv_underflow_cnt++;
742 }
743
744 if (!mdp_interrupt)
745 break;
746
747 /* DMA3 TV-Out Start */
748 if (mdp_interrupt & TV_OUT_DMA3_START) {
749 /* let's disable TV out interrupt */
750 mdp_intr_mask &= ~TV_OUT_DMA3_START;
751 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
752
753 dma = &dma3_data;
754 if (dma->waiting) {
755 dma->waiting = FALSE;
756 complete(&dma->comp);
757 }
758 }
759#ifndef CONFIG_FB_MSM_MDP22
760 if (mdp_interrupt & MDP_HIST_DONE) {
761 outp32(MDP_BASE + 0x94018, 0x3);
762 outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
763 if (mdp_hist.r)
764 memcpy(mdp_hist.r, MDP_BASE + 0x94100,
765 mdp_hist.bin_cnt*4);
766 if (mdp_hist.g)
767 memcpy(mdp_hist.g, MDP_BASE + 0x94200,
768 mdp_hist.bin_cnt*4);
769 if (mdp_hist.b)
770 memcpy(mdp_hist.b, MDP_BASE + 0x94300,
771 mdp_hist.bin_cnt*4);
772 complete(&mdp_hist_comp);
773 if (mdp_is_hist_start == TRUE) {
774 MDP_OUTP(MDP_BASE + 0x94004,
775 mdp_hist.frame_cnt);
776 MDP_OUTP(MDP_BASE + 0x94000, 1);
777 }
778 }
779
780 /* LCDC UnderFlow */
781 if (mdp_interrupt & LCDC_UNDERFLOW) {
782 mdp_lcdc_underflow_cnt++;
783 /*when underflow happens HW resets all the histogram
784 registers that were set before so restore them back
785 to normal.*/
786 MDP_OUTP(MDP_BASE + 0x94010, 1);
787 MDP_OUTP(MDP_BASE + 0x9401c, 2);
788 if (mdp_is_hist_start == TRUE) {
789 MDP_OUTP(MDP_BASE + 0x94004,
790 mdp_hist.frame_cnt);
791 MDP_OUTP(MDP_BASE + 0x94000, 1);
792 }
793 }
794 /* LCDC Frame Start */
795 if (mdp_interrupt & LCDC_FRAME_START) {
796 /* let's disable LCDC interrupt */
797 mdp_intr_mask &= ~LCDC_FRAME_START;
798 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
799
800 dma = &dma2_data;
801 if (dma->waiting) {
802 dma->waiting = FALSE;
803 complete(&dma->comp);
804 }
805 }
806
807 /* DMA2 LCD-Out Complete */
808 if (mdp_interrupt & MDP_DMA_S_DONE) {
809 dma = &dma_s_data;
810 dma->busy = FALSE;
811 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
812 TRUE);
813 complete(&dma->comp);
814 }
815 /* DMA_E LCD-Out Complete */
816 if (mdp_interrupt & MDP_DMA_E_DONE) {
817 dma = &dma_s_data;
818 dma->busy = FALSE;
819 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
820 TRUE);
821 complete(&dma->comp);
822 }
823
824#endif
825
826 /* DMA2 LCD-Out Complete */
827 if (mdp_interrupt & MDP_DMA_P_DONE) {
828 struct timeval now;
829
830 mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
831 mdp_dma2_last_update_time);
832 if (mdp_debug[MDP_DMA2_BLOCK]) {
833 jiffies_to_timeval(jiffies, &now);
834 mdp_dma2_timeval.tv_usec =
835 now.tv_usec - mdp_dma2_timeval.tv_usec;
836 }
837#ifndef CONFIG_FB_MSM_MDP303
838 dma = &dma2_data;
839 dma->busy = FALSE;
840 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
841 TRUE);
842 complete(&dma->comp);
843#else
844 if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
845 dma = &dma2_data;
846 dma->busy = FALSE;
847 mdp_pipe_ctrl(MDP_DMA2_BLOCK,
848 MDP_BLOCK_POWER_OFF, TRUE);
849 complete(&dma->comp);
850 }
851#endif
852 }
853 /* PPP Complete */
854 if (mdp_interrupt & MDP_PPP_DONE) {
855#ifdef CONFIG_FB_MSM_MDP31
856 MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
857#endif
858 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
859 if (mdp_ppp_waiting) {
860 mdp_ppp_waiting = FALSE;
861 complete(&mdp_ppp_comp);
862 }
863 }
864 } while (1);
865
866 mdp_is_in_isr = FALSE;
867
Pavel Machekd480ace2009-09-22 16:47:03 -0700868 return IRQ_HANDLED;
869}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700871
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700872static void mdp_drv_init(void)
Pavel Machekd480ace2009-09-22 16:47:03 -0700873{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 int i;
Pavel Machekd480ace2009-09-22 16:47:03 -0700875
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 for (i = 0; i < MDP_MAX_BLOCK; i++) {
877 mdp_debug[i] = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700878 }
879
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880 /* initialize spin lock and workqueue */
881 spin_lock_init(&mdp_spin_lock);
882 mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
883 mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
884 mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
885 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
886 mdp_pipe_ctrl_workqueue_handler);
Pavel Machekd480ace2009-09-22 16:47:03 -0700887
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888 /* initialize semaphore */
889 init_completion(&mdp_ppp_comp);
890 sema_init(&mdp_ppp_mutex, 1);
891 sema_init(&mdp_pipe_ctrl_mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893 dma2_data.busy = FALSE;
894 dma2_data.dmap_busy = FALSE;
895 dma2_data.waiting = FALSE;
896 init_completion(&dma2_data.comp);
897 init_completion(&dma2_data.dmap_comp);
898 sema_init(&dma2_data.mutex, 1);
899 mutex_init(&dma2_data.ov_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 dma3_data.busy = FALSE;
902 dma3_data.waiting = FALSE;
903 init_completion(&dma3_data.comp);
904 sema_init(&dma3_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700905
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 dma_s_data.busy = FALSE;
907 dma_s_data.waiting = FALSE;
908 init_completion(&dma_s_data.comp);
909 sema_init(&dma_s_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911#ifndef CONFIG_FB_MSM_MDP303
912 dma_e_data.busy = FALSE;
913 dma_e_data.waiting = FALSE;
914 init_completion(&dma_e_data.comp);
915 mutex_init(&dma_e_data.ov_mutex);
916#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918#ifndef CONFIG_FB_MSM_MDP22
919 init_completion(&mdp_hist_comp);
920#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922 /* initializing mdp power block counter to 0 */
923 for (i = 0; i < MDP_MAX_BLOCK; i++) {
924 atomic_set(&mdp_block_power_cnt[i], 0);
Pavel Machekd480ace2009-09-22 16:47:03 -0700925 }
926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927#ifdef MSM_FB_ENABLE_DBGFS
928 {
929 struct dentry *root;
930 char sub_name[] = "mdp";
Pavel Machekd480ace2009-09-22 16:47:03 -0700931
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 root = msm_fb_get_debugfs_root();
933 if (root != NULL) {
934 mdp_dir = debugfs_create_dir(sub_name, root);
935
936 if (mdp_dir) {
937 msm_fb_debugfs_file_create(mdp_dir,
938 "dma2_update_time_in_usec",
939 (u32 *) &mdp_dma2_update_time_in_usec);
940 msm_fb_debugfs_file_create(mdp_dir,
941 "vs_rdcnt_slow",
942 (u32 *) &mdp_lcd_rd_cnt_offset_slow);
943 msm_fb_debugfs_file_create(mdp_dir,
944 "vs_rdcnt_fast",
945 (u32 *) &mdp_lcd_rd_cnt_offset_fast);
946 msm_fb_debugfs_file_create(mdp_dir,
947 "mdp_usec_diff_threshold",
948 (u32 *) &mdp_usec_diff_threshold);
949 msm_fb_debugfs_file_create(mdp_dir,
950 "mdp_current_clk_on",
951 (u32 *) &mdp_current_clk_on);
952#ifdef CONFIG_FB_MSM_LCDC
953 msm_fb_debugfs_file_create(mdp_dir,
954 "lcdc_start_x",
955 (u32 *) &first_pixel_start_x);
956 msm_fb_debugfs_file_create(mdp_dir,
957 "lcdc_start_y",
958 (u32 *) &first_pixel_start_y);
959#endif
960 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700961 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700962 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963#endif
964}
965
966static int mdp_probe(struct platform_device *pdev);
967static int mdp_remove(struct platform_device *pdev);
968
969static int mdp_runtime_suspend(struct device *dev)
970{
971 dev_dbg(dev, "pm_runtime: suspending...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700972 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700973}
974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975static int mdp_runtime_resume(struct device *dev)
Pavel Machekd480ace2009-09-22 16:47:03 -0700976{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 dev_dbg(dev, "pm_runtime: resuming...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700978 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700979}
980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981static struct dev_pm_ops mdp_dev_pm_ops = {
982 .runtime_suspend = mdp_runtime_suspend,
983 .runtime_resume = mdp_runtime_resume,
Pavel Machekd480ace2009-09-22 16:47:03 -0700984};
985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986
987static struct platform_driver mdp_driver = {
988 .probe = mdp_probe,
989 .remove = mdp_remove,
990#ifndef CONFIG_HAS_EARLYSUSPEND
991 .suspend = mdp_suspend,
992 .resume = NULL,
993#endif
994 .shutdown = NULL,
995 .driver = {
996 /*
997 * Driver name must match the device name added in
998 * platform.c.
999 */
1000 .name = "mdp",
1001 .pm = &mdp_dev_pm_ops,
1002 },
1003};
1004
1005static int mdp_off(struct platform_device *pdev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001006{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007 int ret = 0;
1008 mdp_histogram_ctrl(FALSE);
1009
1010 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1011 ret = panel_next_off(pdev);
1012 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1013
1014 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -07001015}
1016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017static int mdp_on(struct platform_device *pdev)
1018{
1019 int ret = 0;
1020#ifdef CONFIG_FB_MSM_MDP40
1021 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1022 if (is_mdp4_hw_reset()) {
1023 mdp4_hw_init();
1024 outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
1025 }
1026 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1027#endif
1028 mdp_histogram_ctrl(TRUE);
1029
1030 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1031 ret = panel_next_on(pdev);
1032 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1033 return ret;
1034}
1035
1036static int mdp_resource_initialized;
1037static struct msm_panel_common_pdata *mdp_pdata;
1038
1039uint32 mdp_hw_revision;
1040
1041/*
1042 * mdp_hw_revision:
1043 * 0 == V1
1044 * 1 == V2
1045 * 2 == V2.1
1046 *
1047 */
1048void mdp_hw_version(void)
1049{
1050 char *cp;
1051 uint32 *hp;
1052
1053 if (mdp_pdata == NULL)
1054 return;
1055
1056 mdp_hw_revision = MDP4_REVISION_NONE;
1057 if (mdp_pdata->hw_revision_addr == 0)
1058 return;
1059
1060 /* tlmmgpio2 shadow */
1061 cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
1062
1063 if (cp == NULL)
1064 return;
1065
1066 hp = (uint32 *)cp; /* HW_REVISION_NUMBER */
1067 mdp_hw_revision = *hp;
1068 iounmap(cp);
1069
1070 mdp_hw_revision >>= 28; /* bit 31:28 */
1071 mdp_hw_revision &= 0x0f;
1072
1073 MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
1074 __func__, mdp_hw_revision);
1075}
1076
1077#ifdef CONFIG_FB_MSM_MDP40
1078static void configure_mdp_core_clk_table(uint32 min_clk_rate)
1079{
1080 uint8 count;
1081 uint32 current_rate;
1082 if (mdp_clk && mdp_pdata
1083 && mdp_pdata->mdp_core_clk_table) {
1084 if (clk_set_min_rate(mdp_clk,
1085 min_clk_rate) < 0)
1086 printk(KERN_ERR "%s: clk_set_min_rate failed\n",
1087 __func__);
1088 else {
1089 count = 0;
1090 current_rate = clk_get_rate(mdp_clk);
1091 while (count < mdp_pdata->num_mdp_clk) {
1092 if (mdp_pdata->mdp_core_clk_table[count]
1093 < current_rate) {
1094 mdp_pdata->
1095 mdp_core_clk_table[count] =
1096 current_rate;
1097 }
1098 count++;
1099 }
1100 }
1101 }
1102}
1103#endif
1104
1105#ifdef CONFIG_MSM_BUS_SCALING
1106static uint32_t mdp_bus_scale_handle;
1107int mdp_bus_scale_update_request(uint32_t index)
1108{
1109 if (!mdp_pdata && (!mdp_pdata->mdp_bus_scale_table
1110 || index > (mdp_pdata->mdp_bus_scale_table->num_usecases - 1))) {
1111 printk(KERN_ERR "%s invalid table or index\n", __func__);
1112 return -EINVAL;
1113 }
1114 if (mdp_bus_scale_handle < 1) {
1115 printk(KERN_ERR "%s invalid bus handle\n", __func__);
1116 return -EINVAL;
1117 }
1118 return msm_bus_scale_client_update_request(mdp_bus_scale_handle,
1119 index);
1120}
1121#endif
1122DEFINE_MUTEX(mdp_clk_lock);
1123int mdp_set_core_clk(uint16 perf_level)
1124{
1125 int ret = -EINVAL;
1126 if (mdp_clk && mdp_pdata
1127 && mdp_pdata->mdp_core_clk_table) {
1128 if (perf_level > mdp_pdata->num_mdp_clk)
1129 printk(KERN_ERR "%s invalid perf level\n", __func__);
1130 else {
1131 mutex_lock(&mdp_clk_lock);
1132 if (mdp4_extn_disp)
1133 perf_level = 1;
1134 ret = clk_set_rate(mdp_clk,
1135 mdp_pdata->
1136 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1137 - perf_level]);
1138 mutex_unlock(&mdp_clk_lock);
1139 if (ret) {
1140 printk(KERN_ERR "%s unable to set mdp_core_clk rate\n",
1141 __func__);
1142 }
1143 }
1144 }
1145 return ret;
1146}
1147
1148unsigned long mdp_get_core_clk(void)
1149{
1150 unsigned long clk_rate = 0;
1151 if (mdp_clk) {
1152 mutex_lock(&mdp_clk_lock);
1153 clk_rate = clk_get_rate(mdp_clk);
1154 mutex_unlock(&mdp_clk_lock);
1155 }
1156
1157 return clk_rate;
1158}
1159
1160unsigned long mdp_perf_level2clk_rate(uint32 perf_level)
1161{
1162 unsigned long clk_rate = 0;
1163
1164 if (mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1165 if (perf_level > mdp_pdata->num_mdp_clk) {
1166 printk(KERN_ERR "%s invalid perf level\n", __func__);
1167 clk_rate = mdp_get_core_clk();
1168 } else {
1169 if (mdp4_extn_disp)
1170 perf_level = 1;
1171 clk_rate = mdp_pdata->
1172 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1173 - perf_level];
1174 }
1175 } else
1176 clk_rate = mdp_get_core_clk();
1177
1178 return clk_rate;
1179}
1180
1181static int mdp_irq_clk_setup(void)
1182{
1183 int ret;
1184
1185#ifdef CONFIG_FB_MSM_MDP40
1186 ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
1187#else
1188 ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
1189#endif
1190 if (ret) {
1191 printk(KERN_ERR "mdp request_irq() failed!\n");
1192 return ret;
1193 }
1194 disable_irq(mdp_irq);
1195
1196 footswitch = regulator_get(NULL, "fs_mdp");
1197 if (IS_ERR(footswitch))
1198 footswitch = NULL;
1199 else
1200 regulator_enable(footswitch);
1201
1202 mdp_clk = clk_get(NULL, "mdp_clk");
1203 if (IS_ERR(mdp_clk)) {
1204 ret = PTR_ERR(mdp_clk);
1205 printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
1206 free_irq(mdp_irq, 0);
1207 return ret;
1208 }
1209
1210 mdp_pclk = clk_get(NULL, "mdp_pclk");
1211 if (IS_ERR(mdp_pclk))
1212 mdp_pclk = NULL;
1213
1214 if (mdp_rev == MDP_REV_42) {
1215 mdp_axi_clk = clk_get(NULL, "mdp_axi_clk");
1216 if (IS_ERR(mdp_axi_clk)) {
1217 ret = PTR_ERR(mdp_axi_clk);
1218 clk_put(mdp_clk);
1219 pr_err("can't get mdp_axi_clk error:%d!\n", ret);
1220 return ret;
1221 }
1222
1223 mdp_lut_clk = clk_get(NULL, "lut_mdp");
1224 if (IS_ERR(mdp_lut_clk)) {
1225 ret = PTR_ERR(mdp_lut_clk);
1226 pr_err("can't get mdp_clk error:%d!\n", ret);
1227 clk_put(mdp_clk);
1228 clk_put(mdp_axi_clk);
1229 free_irq(mdp_irq, 0);
1230 return ret;
1231 }
1232 } else {
1233 mdp_axi_clk = NULL;
1234 mdp_lut_clk = NULL;
1235 }
1236
1237#ifdef CONFIG_FB_MSM_MDP40
1238 /*
1239 * mdp_clk should greater than mdp_pclk always
1240 */
1241 if (mdp_pdata && mdp_pdata->mdp_core_clk_rate) {
1242 mutex_lock(&mdp_clk_lock);
1243 clk_set_rate(mdp_clk, mdp_pdata->mdp_core_clk_rate);
1244 if (mdp_lut_clk != NULL)
1245 clk_set_rate(mdp_lut_clk, mdp_pdata->mdp_core_clk_rate);
1246 mutex_unlock(&mdp_clk_lock);
1247 }
1248 MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
1249#endif
1250 return 0;
1251}
1252
1253static int mdp_probe(struct platform_device *pdev)
1254{
1255 struct platform_device *msm_fb_dev = NULL;
1256 struct msm_fb_data_type *mfd;
1257 struct msm_fb_panel_data *pdata = NULL;
1258 int rc;
1259 resource_size_t size ;
1260#ifdef CONFIG_FB_MSM_MDP40
1261 int intf, if_no;
1262#else
1263 unsigned long flag;
1264#endif
1265#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
1266 struct mipi_panel_info *mipi;
1267#endif
1268
1269 if ((pdev->id == 0) && (pdev->num_resources > 0)) {
1270 mdp_pdata = pdev->dev.platform_data;
1271
1272 size = resource_size(&pdev->resource[0]);
1273 msm_mdp_base = ioremap(pdev->resource[0].start, size);
1274
1275 MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
1276 (int)pdev->resource[0].start, (int)msm_mdp_base);
1277
1278 if (unlikely(!msm_mdp_base))
1279 return -ENOMEM;
1280
1281 mdp_irq = platform_get_irq(pdev, 0);
1282 if (mdp_irq < 0) {
1283 pr_err("mdp: can not get mdp irq\n");
1284 return -ENOMEM;
1285 }
1286
1287 mdp_rev = mdp_pdata->mdp_rev;
1288 rc = mdp_irq_clk_setup();
1289
1290 if (rc)
1291 return rc;
1292
1293 mdp_hw_version();
1294
1295 /* initializing mdp hw */
1296#ifdef CONFIG_FB_MSM_MDP40
1297 mdp4_hw_init();
1298 mdp4_fetch_cfg(clk_get_rate(mdp_clk));
1299#else
1300 mdp_hw_init();
1301#endif
1302
1303#ifdef CONFIG_FB_MSM_OVERLAY
1304 mdp_hw_cursor_init();
1305#endif
1306
1307 mdp_resource_initialized = 1;
1308 return 0;
1309 }
1310
1311 if (!mdp_resource_initialized)
1312 return -EPERM;
1313
1314 mfd = platform_get_drvdata(pdev);
1315
1316 if (!mfd)
1317 return -ENODEV;
1318
1319 if (mfd->key != MFD_KEY)
1320 return -EINVAL;
1321
1322 if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
1323 return -ENOMEM;
1324
1325 msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
1326 if (!msm_fb_dev)
1327 return -ENOMEM;
1328
1329 /* link to the latest pdev */
1330 mfd->pdev = msm_fb_dev;
1331
1332 /* add panel data */
1333 if (platform_device_add_data
1334 (msm_fb_dev, pdev->dev.platform_data,
1335 sizeof(struct msm_fb_panel_data))) {
1336 printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
1337 rc = -ENOMEM;
1338 goto mdp_probe_err;
1339 }
1340 /* data chain */
1341 pdata = msm_fb_dev->dev.platform_data;
1342 pdata->on = mdp_on;
1343 pdata->off = mdp_off;
1344 pdata->next = pdev;
1345
1346 mdp_prim_panel_type = mfd->panel.type;
1347 switch (mfd->panel.type) {
1348 case EXT_MDDI_PANEL:
1349 case MDDI_PANEL:
1350 case EBI2_PANEL:
1351 INIT_WORK(&mfd->dma_update_worker,
1352 mdp_lcd_update_workqueue_handler);
1353 INIT_WORK(&mfd->vsync_resync_worker,
1354 mdp_vsync_resync_workqueue_handler);
1355 mfd->hw_refresh = FALSE;
1356
1357 if (mfd->panel.type == EXT_MDDI_PANEL) {
1358 /* 15 fps -> 66 msec */
1359 mfd->refresh_timer_duration = (66 * HZ / 1000);
1360 } else {
1361 /* 24 fps -> 42 msec */
1362 mfd->refresh_timer_duration = (42 * HZ / 1000);
1363 }
1364
1365#ifdef CONFIG_FB_MSM_MDP22
1366 mfd->dma_fnc = mdp_dma2_update;
1367 mfd->dma = &dma2_data;
1368#else
1369 if (mfd->panel_info.pdest == DISPLAY_1) {
1370#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
1371 mfd->dma_fnc = mdp4_mddi_overlay;
1372 mfd->cursor_update = mdp4_mddi_overlay_cursor;
1373#else
1374 mfd->dma_fnc = mdp_dma2_update;
1375#endif
1376 mfd->dma = &dma2_data;
1377 mfd->lut_update = mdp_lut_update_nonlcdc;
1378 mfd->do_histogram = mdp_do_histogram;
1379 } else {
1380 mfd->dma_fnc = mdp_dma_s_update;
1381 mfd->dma = &dma_s_data;
1382 }
1383#endif
1384 if (mdp_pdata)
1385 mfd->vsync_gpio = mdp_pdata->gpio;
1386 else
1387 mfd->vsync_gpio = -1;
1388
1389#ifdef CONFIG_FB_MSM_MDP40
1390 if (mfd->panel.type == EBI2_PANEL)
1391 intf = EBI2_INTF;
1392 else
1393 intf = MDDI_INTF;
1394
1395 if (mfd->panel_info.pdest == DISPLAY_1)
1396 if_no = PRIMARY_INTF_SEL;
1397 else
1398 if_no = SECONDARY_INTF_SEL;
1399
1400 mdp4_display_intf_sel(if_no, intf);
1401#endif
1402 mdp_config_vsync(mfd);
1403 break;
1404
1405#ifdef CONFIG_FB_MSM_MIPI_DSI
1406 case MIPI_VIDEO_PANEL:
1407#ifndef CONFIG_FB_MSM_MDP303
1408 pdata->on = mdp4_dsi_video_on;
1409 pdata->off = mdp4_dsi_video_off;
1410 mfd->hw_refresh = TRUE;
1411 mfd->dma_fnc = mdp4_dsi_video_overlay;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -07001412 mfd->lut_update = mdp_lut_update_lcdc;
1413 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001414 if (mfd->panel_info.pdest == DISPLAY_1) {
1415 if_no = PRIMARY_INTF_SEL;
1416 mfd->dma = &dma2_data;
1417 } else {
1418 if_no = EXTERNAL_INTF_SEL;
1419 mfd->dma = &dma_e_data;
1420 }
1421 mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
1422#else
1423 pdata->on = mdp_dsi_video_on;
1424 pdata->off = mdp_dsi_video_off;
1425 mfd->hw_refresh = TRUE;
1426 mfd->dma_fnc = mdp_dsi_video_update;
1427 mfd->do_histogram = mdp_do_histogram;
1428 if (mfd->panel_info.pdest == DISPLAY_1)
1429 mfd->dma = &dma2_data;
1430 else {
1431 printk(KERN_ERR "Invalid Selection of destination panel\n");
1432 rc = -ENODEV;
1433 goto mdp_probe_err;
1434 }
1435
1436#endif
1437 break;
1438
1439 case MIPI_CMD_PANEL:
1440#ifndef CONFIG_FB_MSM_MDP303
1441 mfd->dma_fnc = mdp4_dsi_cmd_overlay;
1442#ifdef CONFIG_FB_MSM_MDP40
1443 mipi = &mfd->panel_info.mipi;
1444 configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
1445#endif
1446 if (mfd->panel_info.pdest == DISPLAY_1) {
1447 if_no = PRIMARY_INTF_SEL;
1448 mfd->dma = &dma2_data;
1449 } else {
1450 if_no = SECONDARY_INTF_SEL;
1451 mfd->dma = &dma_s_data;
1452 }
1453 mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
1454#else
1455 mfd->dma_fnc = mdp_dma2_update;
1456 mfd->do_histogram = mdp_do_histogram;
1457 if (mfd->panel_info.pdest == DISPLAY_1)
1458 mfd->dma = &dma2_data;
1459 else {
1460 printk(KERN_ERR "Invalid Selection of destination panel\n");
1461 rc = -ENODEV;
1462 goto mdp_probe_err;
1463 }
1464#endif
1465 mdp_config_vsync(mfd);
1466 break;
1467#endif
1468
1469#ifdef CONFIG_FB_MSM_DTV
1470 case DTV_PANEL:
1471 pdata->on = mdp4_dtv_on;
1472 pdata->off = mdp4_dtv_off;
1473 mfd->hw_refresh = TRUE;
1474 mfd->cursor_update = mdp_hw_cursor_update;
1475 mfd->dma_fnc = mdp4_dtv_overlay;
1476 mfd->dma = &dma_e_data;
1477 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1478 break;
1479#endif
1480 case HDMI_PANEL:
1481 case LCDC_PANEL:
1482 pdata->on = mdp_lcdc_on;
1483 pdata->off = mdp_lcdc_off;
1484 mfd->hw_refresh = TRUE;
1485#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
1486 mfd->cursor_update = mdp_hw_cursor_sync_update;
1487#else
1488 mfd->cursor_update = mdp_hw_cursor_update;
1489#endif
1490#ifndef CONFIG_FB_MSM_MDP22
1491 mfd->lut_update = mdp_lut_update_lcdc;
1492 mfd->do_histogram = mdp_do_histogram;
1493#endif
1494#ifdef CONFIG_FB_MSM_OVERLAY
1495 mfd->dma_fnc = mdp4_lcdc_overlay;
1496#else
1497 mfd->dma_fnc = mdp_lcdc_update;
1498#endif
1499
1500#ifdef CONFIG_FB_MSM_MDP40
1501 configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
1502 * 23 / 20);
1503 if (mfd->panel.type == HDMI_PANEL) {
1504 mfd->dma = &dma_e_data;
1505 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
1506 } else {
1507 mfd->dma = &dma2_data;
1508 mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
1509 }
1510#else
1511 mfd->dma = &dma2_data;
1512 spin_lock_irqsave(&mdp_spin_lock, flag);
1513 mdp_intr_mask &= ~MDP_DMA_P_DONE;
1514 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
1515 spin_unlock_irqrestore(&mdp_spin_lock, flag);
1516#endif
1517 break;
1518
1519 case TV_PANEL:
1520#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
1521 pdata->on = mdp4_atv_on;
1522 pdata->off = mdp4_atv_off;
1523 mfd->dma_fnc = mdp4_atv_overlay;
1524 mfd->dma = &dma_e_data;
1525 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
1526#else
1527 pdata->on = mdp_dma3_on;
1528 pdata->off = mdp_dma3_off;
1529 mfd->hw_refresh = TRUE;
1530 mfd->dma_fnc = mdp_dma3_update;
1531 mfd->dma = &dma3_data;
1532#endif
1533 break;
1534
1535 default:
1536 printk(KERN_ERR "mdp_probe: unknown device type!\n");
1537 rc = -ENODEV;
1538 goto mdp_probe_err;
1539 }
1540#ifdef CONFIG_FB_MSM_MDP40
1541 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1542 mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
1543 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1544#endif
1545
1546#ifdef CONFIG_MSM_BUS_SCALING
1547 if (!mdp_bus_scale_handle && mdp_pdata &&
1548 mdp_pdata->mdp_bus_scale_table) {
1549 mdp_bus_scale_handle =
1550 msm_bus_scale_register_client(
1551 mdp_pdata->mdp_bus_scale_table);
1552 if (!mdp_bus_scale_handle) {
1553 printk(KERN_ERR "%s not able to get bus scale\n",
1554 __func__);
1555 return -ENOMEM;
1556 }
1557 }
1558#endif
1559 /* set driver data */
1560 platform_set_drvdata(msm_fb_dev, mfd);
1561
1562 rc = platform_device_add(msm_fb_dev);
1563 if (rc) {
1564 goto mdp_probe_err;
1565 }
1566
1567 pm_runtime_set_active(&pdev->dev);
1568 pm_runtime_enable(&pdev->dev);
1569
1570 pdev_list[pdev_list_cnt++] = pdev;
1571 mdp4_extn_disp = 0;
1572 return 0;
1573
1574 mdp_probe_err:
1575 platform_device_put(msm_fb_dev);
1576#ifdef CONFIG_MSM_BUS_SCALING
1577 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1578 mdp_bus_scale_handle > 0)
1579 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1580#endif
1581 return rc;
1582}
1583
1584#ifdef CONFIG_PM
1585static void mdp_suspend_sub(void)
1586{
1587 /* cancel pipe ctrl worker */
1588 cancel_delayed_work(&mdp_pipe_ctrl_worker);
1589
1590 /* for workder can't be cancelled... */
1591 flush_workqueue(mdp_pipe_ctrl_wq);
1592
1593 /* let's wait for PPP completion */
1594 while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
1595 cpu_relax();
1596
1597 /* try to power down */
1598 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1599
1600 mutex_lock(&mdp_suspend_mutex);
1601 mdp_suspended = TRUE;
1602 mutex_unlock(&mdp_suspend_mutex);
1603}
1604#endif
1605
1606#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1607static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
1608{
1609 if (pdev->id == 0) {
1610 mdp_suspend_sub();
1611 if (mdp_current_clk_on) {
1612 printk(KERN_WARNING"MDP suspend failed\n");
1613 return -EBUSY;
1614 }
1615 }
1616
1617 return 0;
1618}
1619#endif
1620
1621#ifdef CONFIG_HAS_EARLYSUSPEND
1622static void mdp_early_suspend(struct early_suspend *h)
1623{
1624 mdp_suspend_sub();
1625}
1626
1627static void mdp_early_resume(struct early_suspend *h)
1628{
1629 mutex_lock(&mdp_suspend_mutex);
1630 mdp_suspended = FALSE;
1631 mutex_unlock(&mdp_suspend_mutex);
1632}
1633#endif
1634
1635static int mdp_remove(struct platform_device *pdev)
1636{
1637 if (footswitch != NULL)
1638 regulator_put(footswitch);
1639 iounmap(msm_mdp_base);
1640 pm_runtime_disable(&pdev->dev);
1641#ifdef CONFIG_MSM_BUS_SCALING
1642 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1643 mdp_bus_scale_handle > 0)
1644 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1645#endif
1646 return 0;
1647}
1648
1649static int mdp_register_driver(void)
1650{
1651#ifdef CONFIG_HAS_EARLYSUSPEND
1652 early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
1653 early_suspend.suspend = mdp_early_suspend;
1654 early_suspend.resume = mdp_early_resume;
1655 register_early_suspend(&early_suspend);
1656#endif
1657
1658 return platform_driver_register(&mdp_driver);
1659}
1660
1661static int __init mdp_driver_init(void)
1662{
1663 int ret;
1664
1665 mdp_drv_init();
1666
1667 ret = mdp_register_driver();
1668 if (ret) {
1669 printk(KERN_ERR "mdp_register_driver() failed!\n");
1670 return ret;
1671 }
1672
1673#if defined(CONFIG_DEBUG_FS)
1674 mdp_debugfs_init();
1675#endif
1676
1677 return 0;
1678
1679}
1680
1681module_init(mdp_driver_init);