blob: cc4cd21018fdc4b916d7c86f49c0fdf6a89d140b [file] [log] [blame]
Pavel Machekd480ace2009-09-22 16:47:03 -07001/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
Pavel Machekd480ace2009-09-22 16:47:03 -07006 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/module.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070019#include <linux/kernel.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/sched.h>
21#include <linux/time.h>
22#include <linux/init.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070023#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <linux/spinlock.h>
25#include <linux/hrtimer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070026#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/hardware.h>
28#include <linux/io.h>
29#include <linux/debugfs.h>
30#include <linux/delay.h>
31#include <linux/mutex.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <asm/system.h>
36#include <asm/mach-types.h>
37#include <linux/semaphore.h>
38#include <linux/uaccess.h>
39#include <mach/clk.h>
40#include "mdp.h"
41#include "msm_fb.h"
42#ifdef CONFIG_FB_MSM_MDP40
43#include "mdp4.h"
44#endif
45#include "mipi_dsi.h"
Pavel Machekd480ace2009-09-22 16:47:03 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047uint32 mdp4_extn_disp;
Pavel Machekd480ace2009-09-22 16:47:03 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049static struct clk *mdp_clk;
50static struct clk *mdp_pclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051static struct clk *mdp_lut_clk;
52int mdp_rev;
Pavel Machekd480ace2009-09-22 16:47:03 -070053
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -070054static struct regulator *footswitch;
Pavel Machekd480ace2009-09-22 16:47:03 -070055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056struct completion mdp_ppp_comp;
57struct semaphore mdp_ppp_mutex;
58struct semaphore mdp_pipe_ctrl_mutex;
Pavel Machekd480ace2009-09-22 16:47:03 -070059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060unsigned long mdp_timer_duration = (HZ/20); /* 50 msecond */
Pavel Machekd480ace2009-09-22 16:47:03 -070061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062boolean mdp_ppp_waiting = FALSE;
63uint32 mdp_tv_underflow_cnt;
64uint32 mdp_lcdc_underflow_cnt;
65
66boolean mdp_current_clk_on = FALSE;
67boolean mdp_is_in_isr = FALSE;
68
69/*
70 * legacy mdp_in_processing is only for DMA2-MDDI
71 * this applies to DMA2 block only
72 */
73uint32 mdp_in_processing = FALSE;
74
75#ifdef CONFIG_FB_MSM_MDP40
76uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
77#else
78uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
79#endif
80
81MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
82
83atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
84
85spinlock_t mdp_spin_lock;
86struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
87struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
88
89static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
90static struct delayed_work mdp_pipe_ctrl_worker;
91
92static boolean mdp_suspended = FALSE;
93DEFINE_MUTEX(mdp_suspend_mutex);
94
95#ifdef CONFIG_FB_MSM_MDP40
96struct mdp_dma_data dma2_data;
97struct mdp_dma_data dma_s_data;
98struct mdp_dma_data dma_e_data;
99ulong mdp4_display_intf;
100#else
101static struct mdp_dma_data dma2_data;
102static struct mdp_dma_data dma_s_data;
103#ifndef CONFIG_FB_MSM_MDP303
104static struct mdp_dma_data dma_e_data;
105#endif
106#endif
107static struct mdp_dma_data dma3_data;
108
109extern ktime_t mdp_dma2_last_update_time;
110
111extern uint32 mdp_dma2_update_time_in_usec;
112extern int mdp_lcd_rd_cnt_offset_slow;
113extern int mdp_lcd_rd_cnt_offset_fast;
114extern int mdp_usec_diff_threshold;
115
116#ifdef CONFIG_FB_MSM_LCDC
117extern int first_pixel_start_x;
118extern int first_pixel_start_y;
119#endif
120
121#ifdef MSM_FB_ENABLE_DBGFS
122struct dentry *mdp_dir;
123#endif
124
125#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
126static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
127#else
128#define mdp_suspend NULL
129#endif
130
131struct timeval mdp_dma2_timeval;
132struct timeval mdp_ppp_timeval;
133
134#ifdef CONFIG_HAS_EARLYSUSPEND
135static struct early_suspend early_suspend;
136#endif
137
138static u32 mdp_irq;
139
140static uint32 mdp_prim_panel_type = NO_PANEL;
141#ifndef CONFIG_FB_MSM_MDP22
142DEFINE_MUTEX(mdp_lut_push_sem);
143static int mdp_lut_i;
144static int mdp_lut_hw_update(struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700145{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 int i;
147 u16 *c[3];
148 u16 r, g, b;
Pavel Machekd480ace2009-09-22 16:47:03 -0700149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150 c[0] = cmap->green;
151 c[1] = cmap->blue;
152 c[2] = cmap->red;
Pavel Machekd480ace2009-09-22 16:47:03 -0700153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 for (i = 0; i < cmap->len; i++) {
155 if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
156 copy_from_user(&g, cmap->green++, sizeof(g)) ||
157 copy_from_user(&b, cmap->blue++, sizeof(b)))
158 return -EFAULT;
159
160#ifdef CONFIG_FB_MSM_MDP40
161 MDP_OUTP(MDP_BASE + 0x94800 +
162#else
163 MDP_OUTP(MDP_BASE + 0x93800 +
164#endif
165 (0x400*mdp_lut_i) + cmap->start*4 + i*4,
166 ((g & 0xff) |
167 ((b & 0xff) << 8) |
168 ((r & 0xff) << 16)));
Pavel Machekd480ace2009-09-22 16:47:03 -0700169 }
170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700172}
173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174static int mdp_lut_push;
175static int mdp_lut_push_i;
176static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 int ret;
179
180 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
181 ret = mdp_lut_hw_update(cmap);
182 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
183
184 if (ret)
185 return ret;
186
187 mutex_lock(&mdp_lut_push_sem);
188 mdp_lut_push = 1;
189 mdp_lut_push_i = mdp_lut_i;
190 mutex_unlock(&mdp_lut_push_sem);
191
192 mdp_lut_i = (mdp_lut_i + 1)%2;
193
194 return 0;
195}
196
197static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
198{
199 int ret;
200
201 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
202 ret = mdp_lut_hw_update(cmap);
203
204 if (ret) {
205 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
206 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -0700207 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208
209 MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
210 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
211 mdp_lut_i = (mdp_lut_i + 1)%2;
212
213 return 0;
214}
215
216static void mdp_lut_enable(void)
217{
218 if (mdp_lut_push) {
219 mutex_lock(&mdp_lut_push_sem);
220 mdp_lut_push = 0;
221 MDP_OUTP(MDP_BASE + 0x90070,
222 (mdp_lut_push_i << 10) | 0x17);
223 mutex_unlock(&mdp_lut_push_sem);
224 }
225}
226
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700227#define MDP_REV42_HIST_MAX_BIN 128
228#define MDP_REV41_HIST_MAX_BIN 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229
230#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700231unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232struct completion mdp_hist_comp;
233boolean mdp_is_hist_start = FALSE;
234#else
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700235static unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236static struct completion mdp_hist_comp;
237static boolean mdp_is_hist_start = FALSE;
238#endif
239static DEFINE_MUTEX(mdp_hist_mutex);
240
241int mdp_histogram_ctrl(boolean en)
242{
243 unsigned long flag;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700244 unsigned long hist_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 boolean hist_start;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700246
247 if (mdp_rev >= MDP_REV_40)
248 hist_base = 0x95000;
249 else
250 hist_base = 0x94000;
251
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252 spin_lock_irqsave(&mdp_spin_lock, flag);
253 hist_start = mdp_is_hist_start;
254 spin_unlock_irqrestore(&mdp_spin_lock, flag);
255
256 if (hist_start == TRUE) {
257 if (en == TRUE) {
258 mdp_enable_irq(MDP_HISTOGRAM_TERM);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700259 mdp_hist_frame_cnt = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700261 if (mdp_rev >= MDP_REV_40) {
262 MDP_OUTP(MDP_BASE + hist_base + 0x10, 1);
263 MDP_OUTP(MDP_BASE + hist_base + 0x1c,
264 INTR_HIST_DONE);
265 }
266 MDP_OUTP(MDP_BASE + hist_base + 0x4,
267 mdp_hist_frame_cnt);
268 MDP_OUTP(MDP_BASE + hist_base, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
270 FALSE);
271 } else
272 mdp_disable_irq(MDP_HISTOGRAM_TERM);
Pavel Machekd480ace2009-09-22 16:47:03 -0700273 }
274 return 0;
275}
276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277int mdp_start_histogram(struct fb_info *info)
Pavel Machekd480ace2009-09-22 16:47:03 -0700278{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 unsigned long flag;
Pavel Machekd480ace2009-09-22 16:47:03 -0700280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281 int ret = 0;
282 mutex_lock(&mdp_hist_mutex);
283 if (mdp_is_hist_start == TRUE) {
284 printk(KERN_ERR "%s histogram already started\n", __func__);
285 ret = -EPERM;
286 goto mdp_hist_start_err;
287 }
288
289 spin_lock_irqsave(&mdp_spin_lock, flag);
290 mdp_is_hist_start = TRUE;
291 spin_unlock_irqrestore(&mdp_spin_lock, flag);
292 mdp_enable_irq(MDP_HISTOGRAM_TERM);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700293 mdp_hist_frame_cnt = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
295#ifdef CONFIG_FB_MSM_MDP40
296 MDP_OUTP(MDP_BASE + 0x95004, 1);
297 MDP_OUTP(MDP_BASE + 0x95000, 1);
298#else
299 MDP_OUTP(MDP_BASE + 0x94004, 1);
300 MDP_OUTP(MDP_BASE + 0x94000, 1);
301#endif
302 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
303
304mdp_hist_start_err:
305 mutex_unlock(&mdp_hist_mutex);
306 return ret;
307
308}
309int mdp_stop_histogram(struct fb_info *info)
310{
311 unsigned long flag;
312 int ret = 0;
313 mutex_lock(&mdp_hist_mutex);
314 if (!mdp_is_hist_start) {
315 printk(KERN_ERR "%s histogram already stopped\n", __func__);
316 ret = -EPERM;
317 goto mdp_hist_stop_err;
318 }
319 spin_lock_irqsave(&mdp_spin_lock, flag);
320 mdp_is_hist_start = FALSE;
321 spin_unlock_irqrestore(&mdp_spin_lock, flag);
322 /* disable the irq for histogram since we handled it
323 when the control reaches here */
324 mdp_disable_irq(MDP_HISTOGRAM_TERM);
325
326mdp_hist_stop_err:
327 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700328 return ret;
329}
330
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700331static int mdp_copy_hist_data(struct mdp_histogram *hist)
Pavel Machekd480ace2009-09-22 16:47:03 -0700332{
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700333 char *mdp_hist_base;
334 uint32 r_data_offset = 0x100, g_data_offset = 0x200;
335 uint32 b_data_offset = 0x300;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 int ret = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700337
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700338 mutex_lock(&mdp_hist_mutex);
339 if (mdp_rev >= MDP_REV_42) {
340 mdp_hist_base = MDP_BASE + 0x95000;
341 r_data_offset = 0x400;
342 g_data_offset = 0x800;
343 b_data_offset = 0xc00;
344 } else if (mdp_rev >= MDP_REV_40 && mdp_rev <= MDP_REV_41) {
345 mdp_hist_base = MDP_BASE + 0x95000;
346 } else if (mdp_rev >= MDP_REV_30 && mdp_rev <= MDP_REV_31) {
347 mdp_hist_base = MDP_BASE + 0x94000;
348 } else {
349 pr_err("%s(): Unsupported MDP rev %u\n", __func__, mdp_rev);
350 return -EPERM;
351 }
352
353 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
354 if (hist->r) {
355 ret = copy_to_user(hist->r, mdp_hist_base + r_data_offset,
356 hist->bin_cnt * 4);
357 if (ret)
358 goto hist_err;
359 }
360 if (hist->g) {
361 ret = copy_to_user(hist->g, mdp_hist_base + g_data_offset,
362 hist->bin_cnt * 4);
363 if (ret)
364 goto hist_err;
365 }
366 if (hist->b) {
367 ret = copy_to_user(hist->b, mdp_hist_base + b_data_offset,
368 hist->bin_cnt * 4);
369 if (ret)
370 goto hist_err;
371 }
372
373 if (mdp_is_hist_start == TRUE) {
374 MDP_OUTP(mdp_hist_base + 0x004,
375 mdp_hist_frame_cnt);
376 MDP_OUTP(mdp_hist_base, 1);
377 }
378 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
379 mutex_unlock(&mdp_hist_mutex);
380 return 0;
381
382hist_err:
383 printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
384 return ret;
385}
386
387static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
388{
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700389 if (!hist->frame_cnt || (hist->bin_cnt == 0))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 return -EINVAL;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700391
392 if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN)
393 || (mdp_rev == MDP_REV_42 &&
394 hist->bin_cnt > MDP_REV42_HIST_MAX_BIN))
395 return -EINVAL;
396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 mutex_lock(&mdp_hist_mutex);
398 if (!mdp_is_hist_start) {
399 printk(KERN_ERR "%s histogram not started\n", __func__);
400 mutex_unlock(&mdp_hist_mutex);
401 return -EPERM;
402 }
403 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 INIT_COMPLETION(mdp_hist_comp);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700406 mdp_hist_frame_cnt = hist->frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 wait_for_completion_killable(&mdp_hist_comp);
408
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700409 return mdp_copy_hist_data(hist);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410}
411#endif
412
413/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
414
415int mdp_ppp_pipe_wait(void)
416{
417 int ret = 1;
418
419 /* wait 5 seconds for the operation to complete before declaring
420 the MDP hung */
421
422 if (mdp_ppp_waiting == TRUE) {
423 ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
424 5 * HZ);
425
426 if (!ret)
427 printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
428 __func__);
Pavel Machekd480ace2009-09-22 16:47:03 -0700429 }
430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 return ret;
432}
Pavel Machekd480ace2009-09-22 16:47:03 -0700433
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434static DEFINE_SPINLOCK(mdp_lock);
435static int mdp_irq_mask;
436static int mdp_irq_enabled;
Pavel Machekd480ace2009-09-22 16:47:03 -0700437
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438/*
439 * mdp_enable_irq: can not be called from isr
440 */
441void mdp_enable_irq(uint32 term)
442{
443 unsigned long irq_flags;
444
445 spin_lock_irqsave(&mdp_lock, irq_flags);
446 if (mdp_irq_mask & term) {
447 printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
448 __func__, term, mdp_irq_mask, mdp_irq_enabled);
449 } else {
450 mdp_irq_mask |= term;
451 if (mdp_irq_mask && !mdp_irq_enabled) {
452 mdp_irq_enabled = 1;
453 enable_irq(mdp_irq);
454 }
455 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700456 spin_unlock_irqrestore(&mdp_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457}
458
459/*
460 * mdp_disable_irq: can not be called from isr
461 */
462void mdp_disable_irq(uint32 term)
463{
464 unsigned long irq_flags;
465
466 spin_lock_irqsave(&mdp_lock, irq_flags);
467 if (!(mdp_irq_mask & term)) {
468 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
469 __func__, term, mdp_irq_mask, mdp_irq_enabled);
470 } else {
471 mdp_irq_mask &= ~term;
472 if (!mdp_irq_mask && mdp_irq_enabled) {
473 mdp_irq_enabled = 0;
474 disable_irq(mdp_irq);
475 }
476 }
477 spin_unlock_irqrestore(&mdp_lock, irq_flags);
478}
479
480void mdp_disable_irq_nosync(uint32 term)
481{
482 spin_lock(&mdp_lock);
483 if (!(mdp_irq_mask & term)) {
484 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
485 __func__, term, mdp_irq_mask, mdp_irq_enabled);
486 } else {
487 mdp_irq_mask &= ~term;
488 if (!mdp_irq_mask && mdp_irq_enabled) {
489 mdp_irq_enabled = 0;
490 disable_irq_nosync(mdp_irq);
491 }
492 }
493 spin_unlock(&mdp_lock);
494}
495
496void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
497{
498 /* complete all the writes before starting */
499 wmb();
500
501 /* kick off PPP engine */
502 if (term == MDP_PPP_TERM) {
503 if (mdp_debug[MDP_PPP_BLOCK])
504 jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
505
506 /* let's turn on PPP block */
507 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
508
509 mdp_enable_irq(term);
510 INIT_COMPLETION(mdp_ppp_comp);
511 mdp_ppp_waiting = TRUE;
512 outpdw(MDP_BASE + 0x30, 0x1000);
513 wait_for_completion_killable(&mdp_ppp_comp);
514 mdp_disable_irq(term);
515
516 if (mdp_debug[MDP_PPP_BLOCK]) {
517 struct timeval now;
518
519 jiffies_to_timeval(jiffies, &now);
520 mdp_ppp_timeval.tv_usec =
521 now.tv_usec - mdp_ppp_timeval.tv_usec;
522 MSM_FB_DEBUG("MDP-PPP: %d\n",
523 (int)mdp_ppp_timeval.tv_usec);
524 }
525 } else if (term == MDP_DMA2_TERM) {
526 if (mdp_debug[MDP_DMA2_BLOCK]) {
527 MSM_FB_DEBUG("MDP-DMA2: %d\n",
528 (int)mdp_dma2_timeval.tv_usec);
529 jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
530 }
531 /* DMA update timestamp */
532 mdp_dma2_last_update_time = ktime_get_real();
533 /* let's turn on DMA2 block */
534#if 0
535 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
536#endif
537#ifdef CONFIG_FB_MSM_MDP22
538 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
539#else
540 mdp_lut_enable();
541
542#ifdef CONFIG_FB_MSM_MDP40
543 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
544#else
545 outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
546
547#ifdef CONFIG_FB_MSM_MDP303
548
549#ifdef CONFIG_FB_MSM_MIPI_DSI
kuogee hsieh8717a172011-09-05 09:57:58 -0700550 mipi_dsi_cmd_mdp_start();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551#endif
552
553#endif
554
555#endif
556#endif
557#ifdef CONFIG_FB_MSM_MDP40
558 } else if (term == MDP_DMA_S_TERM) {
559 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
560 outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
561 } else if (term == MDP_DMA_E_TERM) {
562 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
563 outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
564 } else if (term == MDP_OVERLAY0_TERM) {
565 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
566 mdp_lut_enable();
567 outpdw(MDP_BASE + 0x0004, 0);
568 } else if (term == MDP_OVERLAY1_TERM) {
569 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
570 mdp_lut_enable();
571 outpdw(MDP_BASE + 0x0008, 0);
572 }
573#else
574 } else if (term == MDP_DMA_S_TERM) {
575 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
576 outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
577 } else if (term == MDP_DMA_E_TERM) {
578 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
579 outpdw(MDP_BASE + 0x004C, 0x0);
580 }
581#endif
582}
583static int mdp_clk_rate;
584static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
585static int pdev_list_cnt;
586
587static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
588{
589 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
590}
591void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
592 boolean isr)
593{
594 boolean mdp_all_blocks_off = TRUE;
595 int i;
596 unsigned long flag;
597 struct msm_fb_panel_data *pdata;
598
599 /*
600 * It is assumed that if isr = TRUE then start = OFF
601 * if start = ON when isr = TRUE it could happen that the usercontext
602 * could turn off the clocks while the interrupt is updating the
603 * power to ON
604 */
605 WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
606
607 spin_lock_irqsave(&mdp_spin_lock, flag);
608 if (MDP_BLOCK_POWER_ON == state) {
609 atomic_inc(&mdp_block_power_cnt[block]);
610
611 if (MDP_DMA2_BLOCK == block)
612 mdp_in_processing = TRUE;
613 } else {
614 atomic_dec(&mdp_block_power_cnt[block]);
615
616 if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
617 /*
618 * Master has to serve a request to power off MDP always
619 * It also has a timer to power off. So, in case of
620 * timer expires first and DMA2 finishes later,
621 * master has to power off two times
622 * There shouldn't be multiple power-off request for
623 * other blocks
624 */
625 if (block != MDP_MASTER_BLOCK) {
626 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
627 multiple power-off request\n", block);
628 }
629 atomic_set(&mdp_block_power_cnt[block], 0);
630 }
631
632 if (MDP_DMA2_BLOCK == block)
633 mdp_in_processing = FALSE;
634 }
635 spin_unlock_irqrestore(&mdp_spin_lock, flag);
636
637 /*
638 * If it's in isr, we send our request to workqueue.
639 * Otherwise, processing happens in the current context
640 */
641 if (isr) {
642 if (mdp_current_clk_on) {
643 /* checking all blocks power state */
644 for (i = 0; i < MDP_MAX_BLOCK; i++) {
645 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
646 mdp_all_blocks_off = FALSE;
647 break;
648 }
649 }
650
651 if (mdp_all_blocks_off) {
652 /* send workqueue to turn off mdp power */
653 queue_delayed_work(mdp_pipe_ctrl_wq,
654 &mdp_pipe_ctrl_worker,
655 mdp_timer_duration);
656 }
657 }
658 } else {
659 down(&mdp_pipe_ctrl_mutex);
660 /* checking all blocks power state */
661 for (i = 0; i < MDP_MAX_BLOCK; i++) {
662 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
663 mdp_all_blocks_off = FALSE;
664 break;
665 }
666 }
667
668 /*
669 * find out whether a delayable work item is currently
670 * pending
671 */
672
673 if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
674 /*
675 * try to cancel the current work if it fails to
676 * stop (which means del_timer can't delete it
677 * from the list, it's about to expire and run),
678 * we have to let it run. queue_delayed_work won't
679 * accept the next job which is same as
680 * queue_delayed_work(mdp_timer_duration = 0)
681 */
682 cancel_delayed_work(&mdp_pipe_ctrl_worker);
683 }
684
685 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
686 mutex_lock(&mdp_suspend_mutex);
687 if (block == MDP_MASTER_BLOCK || mdp_suspended) {
688 mdp_current_clk_on = FALSE;
689 mb();
690 /* turn off MDP clks */
691 mdp_vsync_clk_disable();
692 for (i = 0; i < pdev_list_cnt; i++) {
693 pdata = (struct msm_fb_panel_data *)
694 pdev_list[i]->dev.platform_data;
695 if (pdata && pdata->clk_func)
696 pdata->clk_func(0);
697 }
698 if (mdp_clk != NULL) {
699 mdp_clk_rate = clk_get_rate(mdp_clk);
700 clk_disable(mdp_clk);
701 if (mdp_hw_revision <=
702 MDP4_REVISION_V2_1 &&
703 mdp_clk_rate > 122880000) {
704 clk_set_rate(mdp_clk,
705 122880000);
706 }
707 MSM_FB_DEBUG("MDP CLK OFF\n");
708 }
709 if (mdp_pclk != NULL) {
710 clk_disable(mdp_pclk);
711 MSM_FB_DEBUG("MDP PCLK OFF\n");
712 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713 if (mdp_lut_clk != NULL)
714 clk_disable(mdp_lut_clk);
715 } else {
716 /* send workqueue to turn off mdp power */
717 queue_delayed_work(mdp_pipe_ctrl_wq,
718 &mdp_pipe_ctrl_worker,
719 mdp_timer_duration);
720 }
721 mutex_unlock(&mdp_suspend_mutex);
722 } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
723 mdp_current_clk_on = TRUE;
724 /* turn on MDP clks */
725 for (i = 0; i < pdev_list_cnt; i++) {
726 pdata = (struct msm_fb_panel_data *)
727 pdev_list[i]->dev.platform_data;
728 if (pdata && pdata->clk_func)
729 pdata->clk_func(1);
730 }
731 if (mdp_clk != NULL) {
732 if (mdp_hw_revision <=
733 MDP4_REVISION_V2_1 &&
734 mdp_clk_rate > 122880000) {
735 clk_set_rate(mdp_clk,
736 mdp_clk_rate);
737 }
738 clk_enable(mdp_clk);
739 MSM_FB_DEBUG("MDP CLK ON\n");
740 }
741 if (mdp_pclk != NULL) {
742 clk_enable(mdp_pclk);
743 MSM_FB_DEBUG("MDP PCLK ON\n");
744 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 if (mdp_lut_clk != NULL)
746 clk_enable(mdp_lut_clk);
747 mdp_vsync_clk_enable();
748 }
749 up(&mdp_pipe_ctrl_mutex);
750 }
751}
752
753#ifndef CONFIG_FB_MSM_MDP40
754irqreturn_t mdp_isr(int irq, void *ptr)
755{
756 uint32 mdp_interrupt = 0;
757 struct mdp_dma_data *dma;
758
759 mdp_is_in_isr = TRUE;
760 do {
761 mdp_interrupt = inp32(MDP_INTR_STATUS);
762 outp32(MDP_INTR_CLEAR, mdp_interrupt);
763
764 mdp_interrupt &= mdp_intr_mask;
765
766 if (mdp_interrupt & TV_ENC_UNDERRUN) {
767 mdp_interrupt &= ~(TV_ENC_UNDERRUN);
768 mdp_tv_underflow_cnt++;
769 }
770
771 if (!mdp_interrupt)
772 break;
773
774 /* DMA3 TV-Out Start */
775 if (mdp_interrupt & TV_OUT_DMA3_START) {
776 /* let's disable TV out interrupt */
777 mdp_intr_mask &= ~TV_OUT_DMA3_START;
778 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
779
780 dma = &dma3_data;
781 if (dma->waiting) {
782 dma->waiting = FALSE;
783 complete(&dma->comp);
784 }
785 }
786#ifndef CONFIG_FB_MSM_MDP22
787 if (mdp_interrupt & MDP_HIST_DONE) {
788 outp32(MDP_BASE + 0x94018, 0x3);
789 outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 complete(&mdp_hist_comp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791 }
792
793 /* LCDC UnderFlow */
794 if (mdp_interrupt & LCDC_UNDERFLOW) {
795 mdp_lcdc_underflow_cnt++;
796 /*when underflow happens HW resets all the histogram
797 registers that were set before so restore them back
798 to normal.*/
799 MDP_OUTP(MDP_BASE + 0x94010, 1);
800 MDP_OUTP(MDP_BASE + 0x9401c, 2);
801 if (mdp_is_hist_start == TRUE) {
802 MDP_OUTP(MDP_BASE + 0x94004,
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700803 mdp_hist_frame_cnt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804 MDP_OUTP(MDP_BASE + 0x94000, 1);
805 }
806 }
807 /* LCDC Frame Start */
808 if (mdp_interrupt & LCDC_FRAME_START) {
809 /* let's disable LCDC interrupt */
810 mdp_intr_mask &= ~LCDC_FRAME_START;
811 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
812
813 dma = &dma2_data;
814 if (dma->waiting) {
815 dma->waiting = FALSE;
816 complete(&dma->comp);
817 }
818 }
819
820 /* DMA2 LCD-Out Complete */
821 if (mdp_interrupt & MDP_DMA_S_DONE) {
822 dma = &dma_s_data;
823 dma->busy = FALSE;
824 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
825 TRUE);
826 complete(&dma->comp);
827 }
828 /* DMA_E LCD-Out Complete */
829 if (mdp_interrupt & MDP_DMA_E_DONE) {
830 dma = &dma_s_data;
831 dma->busy = FALSE;
832 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
833 TRUE);
834 complete(&dma->comp);
835 }
836
837#endif
838
839 /* DMA2 LCD-Out Complete */
840 if (mdp_interrupt & MDP_DMA_P_DONE) {
841 struct timeval now;
842
843 mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
844 mdp_dma2_last_update_time);
845 if (mdp_debug[MDP_DMA2_BLOCK]) {
846 jiffies_to_timeval(jiffies, &now);
847 mdp_dma2_timeval.tv_usec =
848 now.tv_usec - mdp_dma2_timeval.tv_usec;
849 }
850#ifndef CONFIG_FB_MSM_MDP303
851 dma = &dma2_data;
852 dma->busy = FALSE;
853 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
854 TRUE);
855 complete(&dma->comp);
856#else
857 if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
858 dma = &dma2_data;
859 dma->busy = FALSE;
860 mdp_pipe_ctrl(MDP_DMA2_BLOCK,
861 MDP_BLOCK_POWER_OFF, TRUE);
862 complete(&dma->comp);
863 }
864#endif
865 }
866 /* PPP Complete */
867 if (mdp_interrupt & MDP_PPP_DONE) {
868#ifdef CONFIG_FB_MSM_MDP31
869 MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
870#endif
871 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
872 if (mdp_ppp_waiting) {
873 mdp_ppp_waiting = FALSE;
874 complete(&mdp_ppp_comp);
875 }
876 }
877 } while (1);
878
879 mdp_is_in_isr = FALSE;
880
Pavel Machekd480ace2009-09-22 16:47:03 -0700881 return IRQ_HANDLED;
882}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885static void mdp_drv_init(void)
Pavel Machekd480ace2009-09-22 16:47:03 -0700886{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 int i;
Pavel Machekd480ace2009-09-22 16:47:03 -0700888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889 for (i = 0; i < MDP_MAX_BLOCK; i++) {
890 mdp_debug[i] = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700891 }
892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893 /* initialize spin lock and workqueue */
894 spin_lock_init(&mdp_spin_lock);
895 mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
896 mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
897 mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
898 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
899 mdp_pipe_ctrl_workqueue_handler);
Pavel Machekd480ace2009-09-22 16:47:03 -0700900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 /* initialize semaphore */
902 init_completion(&mdp_ppp_comp);
903 sema_init(&mdp_ppp_mutex, 1);
904 sema_init(&mdp_pipe_ctrl_mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700905
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 dma2_data.busy = FALSE;
907 dma2_data.dmap_busy = FALSE;
908 dma2_data.waiting = FALSE;
909 init_completion(&dma2_data.comp);
910 init_completion(&dma2_data.dmap_comp);
911 sema_init(&dma2_data.mutex, 1);
912 mutex_init(&dma2_data.ov_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 dma3_data.busy = FALSE;
915 dma3_data.waiting = FALSE;
916 init_completion(&dma3_data.comp);
917 sema_init(&dma3_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700918
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919 dma_s_data.busy = FALSE;
920 dma_s_data.waiting = FALSE;
921 init_completion(&dma_s_data.comp);
922 sema_init(&dma_s_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924#ifndef CONFIG_FB_MSM_MDP303
925 dma_e_data.busy = FALSE;
926 dma_e_data.waiting = FALSE;
927 init_completion(&dma_e_data.comp);
928 mutex_init(&dma_e_data.ov_mutex);
929#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931#ifndef CONFIG_FB_MSM_MDP22
932 init_completion(&mdp_hist_comp);
933#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700934
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935 /* initializing mdp power block counter to 0 */
936 for (i = 0; i < MDP_MAX_BLOCK; i++) {
937 atomic_set(&mdp_block_power_cnt[i], 0);
Pavel Machekd480ace2009-09-22 16:47:03 -0700938 }
939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940#ifdef MSM_FB_ENABLE_DBGFS
941 {
942 struct dentry *root;
943 char sub_name[] = "mdp";
Pavel Machekd480ace2009-09-22 16:47:03 -0700944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945 root = msm_fb_get_debugfs_root();
946 if (root != NULL) {
947 mdp_dir = debugfs_create_dir(sub_name, root);
948
949 if (mdp_dir) {
950 msm_fb_debugfs_file_create(mdp_dir,
951 "dma2_update_time_in_usec",
952 (u32 *) &mdp_dma2_update_time_in_usec);
953 msm_fb_debugfs_file_create(mdp_dir,
954 "vs_rdcnt_slow",
955 (u32 *) &mdp_lcd_rd_cnt_offset_slow);
956 msm_fb_debugfs_file_create(mdp_dir,
957 "vs_rdcnt_fast",
958 (u32 *) &mdp_lcd_rd_cnt_offset_fast);
959 msm_fb_debugfs_file_create(mdp_dir,
960 "mdp_usec_diff_threshold",
961 (u32 *) &mdp_usec_diff_threshold);
962 msm_fb_debugfs_file_create(mdp_dir,
963 "mdp_current_clk_on",
964 (u32 *) &mdp_current_clk_on);
965#ifdef CONFIG_FB_MSM_LCDC
966 msm_fb_debugfs_file_create(mdp_dir,
967 "lcdc_start_x",
968 (u32 *) &first_pixel_start_x);
969 msm_fb_debugfs_file_create(mdp_dir,
970 "lcdc_start_y",
971 (u32 *) &first_pixel_start_y);
972#endif
973 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700974 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700975 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976#endif
977}
978
979static int mdp_probe(struct platform_device *pdev);
980static int mdp_remove(struct platform_device *pdev);
981
982static int mdp_runtime_suspend(struct device *dev)
983{
984 dev_dbg(dev, "pm_runtime: suspending...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700985 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700986}
987
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988static int mdp_runtime_resume(struct device *dev)
Pavel Machekd480ace2009-09-22 16:47:03 -0700989{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 dev_dbg(dev, "pm_runtime: resuming...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700991 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700992}
993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700994static struct dev_pm_ops mdp_dev_pm_ops = {
995 .runtime_suspend = mdp_runtime_suspend,
996 .runtime_resume = mdp_runtime_resume,
Pavel Machekd480ace2009-09-22 16:47:03 -0700997};
998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999
1000static struct platform_driver mdp_driver = {
1001 .probe = mdp_probe,
1002 .remove = mdp_remove,
1003#ifndef CONFIG_HAS_EARLYSUSPEND
1004 .suspend = mdp_suspend,
1005 .resume = NULL,
1006#endif
1007 .shutdown = NULL,
1008 .driver = {
1009 /*
1010 * Driver name must match the device name added in
1011 * platform.c.
1012 */
1013 .name = "mdp",
1014 .pm = &mdp_dev_pm_ops,
1015 },
1016};
1017
1018static int mdp_off(struct platform_device *pdev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001019{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020 int ret = 0;
1021 mdp_histogram_ctrl(FALSE);
1022
1023 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1024 ret = panel_next_off(pdev);
1025 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1026
1027 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -07001028}
1029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030static int mdp_on(struct platform_device *pdev)
1031{
1032 int ret = 0;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001035 struct msm_fb_data_type *mfd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1037 if (is_mdp4_hw_reset()) {
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001038 mfd = platform_get_drvdata(pdev);
1039 mdp_vsync_cfg_regs(mfd, FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040 mdp4_hw_init();
1041 outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
1042 }
1043 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1044#endif
1045 mdp_histogram_ctrl(TRUE);
1046
1047 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1048 ret = panel_next_on(pdev);
1049 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1050 return ret;
1051}
1052
1053static int mdp_resource_initialized;
1054static struct msm_panel_common_pdata *mdp_pdata;
1055
1056uint32 mdp_hw_revision;
1057
1058/*
1059 * mdp_hw_revision:
1060 * 0 == V1
1061 * 1 == V2
1062 * 2 == V2.1
1063 *
1064 */
1065void mdp_hw_version(void)
1066{
1067 char *cp;
1068 uint32 *hp;
1069
1070 if (mdp_pdata == NULL)
1071 return;
1072
1073 mdp_hw_revision = MDP4_REVISION_NONE;
1074 if (mdp_pdata->hw_revision_addr == 0)
1075 return;
1076
1077 /* tlmmgpio2 shadow */
1078 cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
1079
1080 if (cp == NULL)
1081 return;
1082
1083 hp = (uint32 *)cp; /* HW_REVISION_NUMBER */
1084 mdp_hw_revision = *hp;
1085 iounmap(cp);
1086
1087 mdp_hw_revision >>= 28; /* bit 31:28 */
1088 mdp_hw_revision &= 0x0f;
1089
1090 MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
1091 __func__, mdp_hw_revision);
1092}
1093
kuogee hsieh5a7f32c2011-08-31 17:51:34 -07001094int mdp4_writeback_offset(void)
1095{
1096 int off = 0;
1097
1098 if (mdp_pdata->writeback_offset)
1099 off = mdp_pdata->writeback_offset();
1100
1101 pr_debug("%s: writeback_offset=%d %x\n", __func__, off, off);
1102
1103 return off;
1104}
1105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106#ifdef CONFIG_FB_MSM_MDP40
1107static void configure_mdp_core_clk_table(uint32 min_clk_rate)
1108{
1109 uint8 count;
1110 uint32 current_rate;
1111 if (mdp_clk && mdp_pdata
1112 && mdp_pdata->mdp_core_clk_table) {
1113 if (clk_set_min_rate(mdp_clk,
1114 min_clk_rate) < 0)
1115 printk(KERN_ERR "%s: clk_set_min_rate failed\n",
1116 __func__);
1117 else {
1118 count = 0;
1119 current_rate = clk_get_rate(mdp_clk);
1120 while (count < mdp_pdata->num_mdp_clk) {
1121 if (mdp_pdata->mdp_core_clk_table[count]
1122 < current_rate) {
1123 mdp_pdata->
1124 mdp_core_clk_table[count] =
1125 current_rate;
1126 }
1127 count++;
1128 }
1129 }
1130 }
1131}
1132#endif
1133
1134#ifdef CONFIG_MSM_BUS_SCALING
1135static uint32_t mdp_bus_scale_handle;
1136int mdp_bus_scale_update_request(uint32_t index)
1137{
1138 if (!mdp_pdata && (!mdp_pdata->mdp_bus_scale_table
1139 || index > (mdp_pdata->mdp_bus_scale_table->num_usecases - 1))) {
1140 printk(KERN_ERR "%s invalid table or index\n", __func__);
1141 return -EINVAL;
1142 }
1143 if (mdp_bus_scale_handle < 1) {
1144 printk(KERN_ERR "%s invalid bus handle\n", __func__);
1145 return -EINVAL;
1146 }
1147 return msm_bus_scale_client_update_request(mdp_bus_scale_handle,
1148 index);
1149}
1150#endif
1151DEFINE_MUTEX(mdp_clk_lock);
1152int mdp_set_core_clk(uint16 perf_level)
1153{
1154 int ret = -EINVAL;
1155 if (mdp_clk && mdp_pdata
1156 && mdp_pdata->mdp_core_clk_table) {
1157 if (perf_level > mdp_pdata->num_mdp_clk)
1158 printk(KERN_ERR "%s invalid perf level\n", __func__);
1159 else {
1160 mutex_lock(&mdp_clk_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001161 ret = clk_set_rate(mdp_clk,
1162 mdp_pdata->
1163 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1164 - perf_level]);
1165 mutex_unlock(&mdp_clk_lock);
1166 if (ret) {
1167 printk(KERN_ERR "%s unable to set mdp_core_clk rate\n",
1168 __func__);
1169 }
1170 }
1171 }
1172 return ret;
1173}
1174
1175unsigned long mdp_get_core_clk(void)
1176{
1177 unsigned long clk_rate = 0;
1178 if (mdp_clk) {
1179 mutex_lock(&mdp_clk_lock);
1180 clk_rate = clk_get_rate(mdp_clk);
1181 mutex_unlock(&mdp_clk_lock);
1182 }
1183
1184 return clk_rate;
1185}
1186
1187unsigned long mdp_perf_level2clk_rate(uint32 perf_level)
1188{
1189 unsigned long clk_rate = 0;
1190
1191 if (mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1192 if (perf_level > mdp_pdata->num_mdp_clk) {
1193 printk(KERN_ERR "%s invalid perf level\n", __func__);
1194 clk_rate = mdp_get_core_clk();
1195 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 clk_rate = mdp_pdata->
1197 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1198 - perf_level];
1199 }
1200 } else
1201 clk_rate = mdp_get_core_clk();
1202
1203 return clk_rate;
1204}
1205
1206static int mdp_irq_clk_setup(void)
1207{
1208 int ret;
1209
1210#ifdef CONFIG_FB_MSM_MDP40
1211 ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
1212#else
1213 ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
1214#endif
1215 if (ret) {
1216 printk(KERN_ERR "mdp request_irq() failed!\n");
1217 return ret;
1218 }
1219 disable_irq(mdp_irq);
1220
1221 footswitch = regulator_get(NULL, "fs_mdp");
1222 if (IS_ERR(footswitch))
1223 footswitch = NULL;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001224 else
1225 regulator_enable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226
1227 mdp_clk = clk_get(NULL, "mdp_clk");
1228 if (IS_ERR(mdp_clk)) {
1229 ret = PTR_ERR(mdp_clk);
1230 printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
1231 free_irq(mdp_irq, 0);
1232 return ret;
1233 }
1234
1235 mdp_pclk = clk_get(NULL, "mdp_pclk");
1236 if (IS_ERR(mdp_pclk))
1237 mdp_pclk = NULL;
1238
1239 if (mdp_rev == MDP_REV_42) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 mdp_lut_clk = clk_get(NULL, "lut_mdp");
1241 if (IS_ERR(mdp_lut_clk)) {
1242 ret = PTR_ERR(mdp_lut_clk);
1243 pr_err("can't get mdp_clk error:%d!\n", ret);
1244 clk_put(mdp_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 free_irq(mdp_irq, 0);
1246 return ret;
1247 }
1248 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001249 mdp_lut_clk = NULL;
1250 }
1251
1252#ifdef CONFIG_FB_MSM_MDP40
1253 /*
1254 * mdp_clk should greater than mdp_pclk always
1255 */
1256 if (mdp_pdata && mdp_pdata->mdp_core_clk_rate) {
1257 mutex_lock(&mdp_clk_lock);
1258 clk_set_rate(mdp_clk, mdp_pdata->mdp_core_clk_rate);
1259 if (mdp_lut_clk != NULL)
1260 clk_set_rate(mdp_lut_clk, mdp_pdata->mdp_core_clk_rate);
1261 mutex_unlock(&mdp_clk_lock);
1262 }
1263 MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
1264#endif
1265 return 0;
1266}
1267
1268static int mdp_probe(struct platform_device *pdev)
1269{
1270 struct platform_device *msm_fb_dev = NULL;
1271 struct msm_fb_data_type *mfd;
1272 struct msm_fb_panel_data *pdata = NULL;
1273 int rc;
1274 resource_size_t size ;
1275#ifdef CONFIG_FB_MSM_MDP40
1276 int intf, if_no;
1277#else
1278 unsigned long flag;
1279#endif
1280#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
1281 struct mipi_panel_info *mipi;
1282#endif
1283
1284 if ((pdev->id == 0) && (pdev->num_resources > 0)) {
1285 mdp_pdata = pdev->dev.platform_data;
1286
1287 size = resource_size(&pdev->resource[0]);
1288 msm_mdp_base = ioremap(pdev->resource[0].start, size);
1289
1290 MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
1291 (int)pdev->resource[0].start, (int)msm_mdp_base);
1292
1293 if (unlikely(!msm_mdp_base))
1294 return -ENOMEM;
1295
1296 mdp_irq = platform_get_irq(pdev, 0);
1297 if (mdp_irq < 0) {
1298 pr_err("mdp: can not get mdp irq\n");
1299 return -ENOMEM;
1300 }
1301
1302 mdp_rev = mdp_pdata->mdp_rev;
1303 rc = mdp_irq_clk_setup();
1304
1305 if (rc)
1306 return rc;
1307
1308 mdp_hw_version();
1309
1310 /* initializing mdp hw */
1311#ifdef CONFIG_FB_MSM_MDP40
1312 mdp4_hw_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001313#else
1314 mdp_hw_init();
1315#endif
1316
1317#ifdef CONFIG_FB_MSM_OVERLAY
1318 mdp_hw_cursor_init();
1319#endif
1320
1321 mdp_resource_initialized = 1;
1322 return 0;
1323 }
1324
1325 if (!mdp_resource_initialized)
1326 return -EPERM;
1327
1328 mfd = platform_get_drvdata(pdev);
1329
1330 if (!mfd)
1331 return -ENODEV;
1332
1333 if (mfd->key != MFD_KEY)
1334 return -EINVAL;
1335
1336 if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
1337 return -ENOMEM;
1338
1339 msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
1340 if (!msm_fb_dev)
1341 return -ENOMEM;
1342
1343 /* link to the latest pdev */
1344 mfd->pdev = msm_fb_dev;
1345
1346 /* add panel data */
1347 if (platform_device_add_data
1348 (msm_fb_dev, pdev->dev.platform_data,
1349 sizeof(struct msm_fb_panel_data))) {
1350 printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
1351 rc = -ENOMEM;
1352 goto mdp_probe_err;
1353 }
1354 /* data chain */
1355 pdata = msm_fb_dev->dev.platform_data;
1356 pdata->on = mdp_on;
1357 pdata->off = mdp_off;
1358 pdata->next = pdev;
1359
1360 mdp_prim_panel_type = mfd->panel.type;
1361 switch (mfd->panel.type) {
1362 case EXT_MDDI_PANEL:
1363 case MDDI_PANEL:
1364 case EBI2_PANEL:
1365 INIT_WORK(&mfd->dma_update_worker,
1366 mdp_lcd_update_workqueue_handler);
1367 INIT_WORK(&mfd->vsync_resync_worker,
1368 mdp_vsync_resync_workqueue_handler);
1369 mfd->hw_refresh = FALSE;
1370
1371 if (mfd->panel.type == EXT_MDDI_PANEL) {
1372 /* 15 fps -> 66 msec */
1373 mfd->refresh_timer_duration = (66 * HZ / 1000);
1374 } else {
1375 /* 24 fps -> 42 msec */
1376 mfd->refresh_timer_duration = (42 * HZ / 1000);
1377 }
1378
1379#ifdef CONFIG_FB_MSM_MDP22
1380 mfd->dma_fnc = mdp_dma2_update;
1381 mfd->dma = &dma2_data;
1382#else
1383 if (mfd->panel_info.pdest == DISPLAY_1) {
1384#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
1385 mfd->dma_fnc = mdp4_mddi_overlay;
1386 mfd->cursor_update = mdp4_mddi_overlay_cursor;
1387#else
1388 mfd->dma_fnc = mdp_dma2_update;
1389#endif
1390 mfd->dma = &dma2_data;
1391 mfd->lut_update = mdp_lut_update_nonlcdc;
1392 mfd->do_histogram = mdp_do_histogram;
1393 } else {
1394 mfd->dma_fnc = mdp_dma_s_update;
1395 mfd->dma = &dma_s_data;
1396 }
1397#endif
1398 if (mdp_pdata)
1399 mfd->vsync_gpio = mdp_pdata->gpio;
1400 else
1401 mfd->vsync_gpio = -1;
1402
1403#ifdef CONFIG_FB_MSM_MDP40
1404 if (mfd->panel.type == EBI2_PANEL)
1405 intf = EBI2_INTF;
1406 else
1407 intf = MDDI_INTF;
1408
1409 if (mfd->panel_info.pdest == DISPLAY_1)
1410 if_no = PRIMARY_INTF_SEL;
1411 else
1412 if_no = SECONDARY_INTF_SEL;
1413
1414 mdp4_display_intf_sel(if_no, intf);
1415#endif
1416 mdp_config_vsync(mfd);
1417 break;
1418
1419#ifdef CONFIG_FB_MSM_MIPI_DSI
1420 case MIPI_VIDEO_PANEL:
1421#ifndef CONFIG_FB_MSM_MDP303
1422 pdata->on = mdp4_dsi_video_on;
1423 pdata->off = mdp4_dsi_video_off;
1424 mfd->hw_refresh = TRUE;
1425 mfd->dma_fnc = mdp4_dsi_video_overlay;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -07001426 mfd->lut_update = mdp_lut_update_lcdc;
1427 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 if (mfd->panel_info.pdest == DISPLAY_1) {
1429 if_no = PRIMARY_INTF_SEL;
1430 mfd->dma = &dma2_data;
1431 } else {
1432 if_no = EXTERNAL_INTF_SEL;
1433 mfd->dma = &dma_e_data;
1434 }
1435 mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
1436#else
1437 pdata->on = mdp_dsi_video_on;
1438 pdata->off = mdp_dsi_video_off;
1439 mfd->hw_refresh = TRUE;
1440 mfd->dma_fnc = mdp_dsi_video_update;
1441 mfd->do_histogram = mdp_do_histogram;
1442 if (mfd->panel_info.pdest == DISPLAY_1)
1443 mfd->dma = &dma2_data;
1444 else {
1445 printk(KERN_ERR "Invalid Selection of destination panel\n");
1446 rc = -ENODEV;
1447 goto mdp_probe_err;
1448 }
1449
1450#endif
Adrian Salido-Morenod1b9d7a2011-10-14 18:18:51 -07001451 if (mdp_rev >= MDP_REV_40)
1452 mfd->cursor_update = mdp_hw_cursor_sync_update;
1453 else
1454 mfd->cursor_update = mdp_hw_cursor_update;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 break;
1456
1457 case MIPI_CMD_PANEL:
1458#ifndef CONFIG_FB_MSM_MDP303
1459 mfd->dma_fnc = mdp4_dsi_cmd_overlay;
1460#ifdef CONFIG_FB_MSM_MDP40
1461 mipi = &mfd->panel_info.mipi;
1462 configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
1463#endif
1464 if (mfd->panel_info.pdest == DISPLAY_1) {
1465 if_no = PRIMARY_INTF_SEL;
1466 mfd->dma = &dma2_data;
1467 } else {
1468 if_no = SECONDARY_INTF_SEL;
1469 mfd->dma = &dma_s_data;
1470 }
Carl Vanderlip18f63082011-07-22 12:32:33 -07001471 mfd->lut_update = mdp_lut_update_nonlcdc;
1472 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001473 mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
1474#else
1475 mfd->dma_fnc = mdp_dma2_update;
1476 mfd->do_histogram = mdp_do_histogram;
1477 if (mfd->panel_info.pdest == DISPLAY_1)
1478 mfd->dma = &dma2_data;
1479 else {
1480 printk(KERN_ERR "Invalid Selection of destination panel\n");
1481 rc = -ENODEV;
1482 goto mdp_probe_err;
1483 }
1484#endif
1485 mdp_config_vsync(mfd);
1486 break;
1487#endif
1488
1489#ifdef CONFIG_FB_MSM_DTV
1490 case DTV_PANEL:
1491 pdata->on = mdp4_dtv_on;
1492 pdata->off = mdp4_dtv_off;
1493 mfd->hw_refresh = TRUE;
1494 mfd->cursor_update = mdp_hw_cursor_update;
1495 mfd->dma_fnc = mdp4_dtv_overlay;
1496 mfd->dma = &dma_e_data;
1497 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1498 break;
1499#endif
1500 case HDMI_PANEL:
1501 case LCDC_PANEL:
1502 pdata->on = mdp_lcdc_on;
1503 pdata->off = mdp_lcdc_off;
1504 mfd->hw_refresh = TRUE;
1505#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
1506 mfd->cursor_update = mdp_hw_cursor_sync_update;
1507#else
1508 mfd->cursor_update = mdp_hw_cursor_update;
1509#endif
1510#ifndef CONFIG_FB_MSM_MDP22
1511 mfd->lut_update = mdp_lut_update_lcdc;
1512 mfd->do_histogram = mdp_do_histogram;
1513#endif
1514#ifdef CONFIG_FB_MSM_OVERLAY
1515 mfd->dma_fnc = mdp4_lcdc_overlay;
1516#else
1517 mfd->dma_fnc = mdp_lcdc_update;
1518#endif
1519
1520#ifdef CONFIG_FB_MSM_MDP40
1521 configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
1522 * 23 / 20);
1523 if (mfd->panel.type == HDMI_PANEL) {
1524 mfd->dma = &dma_e_data;
1525 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
1526 } else {
1527 mfd->dma = &dma2_data;
1528 mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
1529 }
1530#else
1531 mfd->dma = &dma2_data;
1532 spin_lock_irqsave(&mdp_spin_lock, flag);
1533 mdp_intr_mask &= ~MDP_DMA_P_DONE;
1534 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
1535 spin_unlock_irqrestore(&mdp_spin_lock, flag);
1536#endif
1537 break;
1538
1539 case TV_PANEL:
1540#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
1541 pdata->on = mdp4_atv_on;
1542 pdata->off = mdp4_atv_off;
1543 mfd->dma_fnc = mdp4_atv_overlay;
1544 mfd->dma = &dma_e_data;
1545 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
1546#else
1547 pdata->on = mdp_dma3_on;
1548 pdata->off = mdp_dma3_off;
1549 mfd->hw_refresh = TRUE;
1550 mfd->dma_fnc = mdp_dma3_update;
1551 mfd->dma = &dma3_data;
1552#endif
1553 break;
1554
1555 default:
1556 printk(KERN_ERR "mdp_probe: unknown device type!\n");
1557 rc = -ENODEV;
1558 goto mdp_probe_err;
1559 }
1560#ifdef CONFIG_FB_MSM_MDP40
1561 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1562 mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
1563 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1564#endif
1565
1566#ifdef CONFIG_MSM_BUS_SCALING
1567 if (!mdp_bus_scale_handle && mdp_pdata &&
1568 mdp_pdata->mdp_bus_scale_table) {
1569 mdp_bus_scale_handle =
1570 msm_bus_scale_register_client(
1571 mdp_pdata->mdp_bus_scale_table);
1572 if (!mdp_bus_scale_handle) {
1573 printk(KERN_ERR "%s not able to get bus scale\n",
1574 __func__);
1575 return -ENOMEM;
1576 }
1577 }
1578#endif
1579 /* set driver data */
1580 platform_set_drvdata(msm_fb_dev, mfd);
1581
1582 rc = platform_device_add(msm_fb_dev);
1583 if (rc) {
1584 goto mdp_probe_err;
1585 }
1586
1587 pm_runtime_set_active(&pdev->dev);
1588 pm_runtime_enable(&pdev->dev);
1589
1590 pdev_list[pdev_list_cnt++] = pdev;
1591 mdp4_extn_disp = 0;
1592 return 0;
1593
1594 mdp_probe_err:
1595 platform_device_put(msm_fb_dev);
1596#ifdef CONFIG_MSM_BUS_SCALING
1597 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1598 mdp_bus_scale_handle > 0)
1599 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1600#endif
1601 return rc;
1602}
1603
1604#ifdef CONFIG_PM
1605static void mdp_suspend_sub(void)
1606{
1607 /* cancel pipe ctrl worker */
1608 cancel_delayed_work(&mdp_pipe_ctrl_worker);
1609
1610 /* for workder can't be cancelled... */
1611 flush_workqueue(mdp_pipe_ctrl_wq);
1612
1613 /* let's wait for PPP completion */
1614 while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
1615 cpu_relax();
1616
1617 /* try to power down */
1618 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1619
1620 mutex_lock(&mdp_suspend_mutex);
1621 mdp_suspended = TRUE;
1622 mutex_unlock(&mdp_suspend_mutex);
1623}
1624#endif
1625
1626#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1627static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
1628{
1629 if (pdev->id == 0) {
1630 mdp_suspend_sub();
1631 if (mdp_current_clk_on) {
1632 printk(KERN_WARNING"MDP suspend failed\n");
1633 return -EBUSY;
1634 }
1635 }
1636
1637 return 0;
1638}
1639#endif
1640
1641#ifdef CONFIG_HAS_EARLYSUSPEND
1642static void mdp_early_suspend(struct early_suspend *h)
1643{
1644 mdp_suspend_sub();
Ravishangar Kalyanamdf021cf2011-10-20 12:53:27 -07001645#ifdef CONFIG_FB_MSM_DTV
1646 mdp4_dtv_set_black_screen();
1647#endif
Ravishangar Kalyanama7c98912011-09-22 12:44:15 -07001648 if (footswitch && mdp_rev > MDP_REV_42)
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001649 regulator_disable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001650}
1651
1652static void mdp_early_resume(struct early_suspend *h)
1653{
Ravishangar Kalyanama7c98912011-09-22 12:44:15 -07001654 if (footswitch && mdp_rev > MDP_REV_42)
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001655 regulator_enable(footswitch);
1656
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 mutex_lock(&mdp_suspend_mutex);
1658 mdp_suspended = FALSE;
1659 mutex_unlock(&mdp_suspend_mutex);
1660}
1661#endif
1662
1663static int mdp_remove(struct platform_device *pdev)
1664{
1665 if (footswitch != NULL)
1666 regulator_put(footswitch);
1667 iounmap(msm_mdp_base);
1668 pm_runtime_disable(&pdev->dev);
1669#ifdef CONFIG_MSM_BUS_SCALING
1670 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1671 mdp_bus_scale_handle > 0)
1672 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1673#endif
1674 return 0;
1675}
1676
1677static int mdp_register_driver(void)
1678{
1679#ifdef CONFIG_HAS_EARLYSUSPEND
1680 early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
1681 early_suspend.suspend = mdp_early_suspend;
1682 early_suspend.resume = mdp_early_resume;
1683 register_early_suspend(&early_suspend);
1684#endif
1685
1686 return platform_driver_register(&mdp_driver);
1687}
1688
1689static int __init mdp_driver_init(void)
1690{
1691 int ret;
1692
1693 mdp_drv_init();
1694
1695 ret = mdp_register_driver();
1696 if (ret) {
1697 printk(KERN_ERR "mdp_register_driver() failed!\n");
1698 return ret;
1699 }
1700
1701#if defined(CONFIG_DEBUG_FS)
1702 mdp_debugfs_init();
1703#endif
1704
1705 return 0;
1706
1707}
1708
1709module_init(mdp_driver_init);