blob: aeeb503fd4a50c9b62af259195e92f54c8918492 [file] [log] [blame]
Pavel Machekd480ace2009-09-22 16:47:03 -07001/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
Pavel Machekd480ace2009-09-22 16:47:03 -07006 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/module.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070019#include <linux/kernel.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/sched.h>
21#include <linux/time.h>
22#include <linux/init.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070023#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <linux/spinlock.h>
25#include <linux/hrtimer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070026#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/hardware.h>
28#include <linux/io.h>
29#include <linux/debugfs.h>
30#include <linux/delay.h>
31#include <linux/mutex.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <asm/system.h>
36#include <asm/mach-types.h>
37#include <linux/semaphore.h>
38#include <linux/uaccess.h>
39#include <mach/clk.h>
40#include "mdp.h"
41#include "msm_fb.h"
42#ifdef CONFIG_FB_MSM_MDP40
43#include "mdp4.h"
44#endif
45#include "mipi_dsi.h"
Pavel Machekd480ace2009-09-22 16:47:03 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047uint32 mdp4_extn_disp;
Pavel Machekd480ace2009-09-22 16:47:03 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049static struct clk *mdp_clk;
50static struct clk *mdp_pclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051static struct clk *mdp_lut_clk;
52int mdp_rev;
Pavel Machekd480ace2009-09-22 16:47:03 -070053
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -070054static struct regulator *footswitch;
Pavel Machekd480ace2009-09-22 16:47:03 -070055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056struct completion mdp_ppp_comp;
57struct semaphore mdp_ppp_mutex;
58struct semaphore mdp_pipe_ctrl_mutex;
Pavel Machekd480ace2009-09-22 16:47:03 -070059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060unsigned long mdp_timer_duration = (HZ/20); /* 50 msecond */
Pavel Machekd480ace2009-09-22 16:47:03 -070061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062boolean mdp_ppp_waiting = FALSE;
63uint32 mdp_tv_underflow_cnt;
64uint32 mdp_lcdc_underflow_cnt;
65
66boolean mdp_current_clk_on = FALSE;
67boolean mdp_is_in_isr = FALSE;
68
69/*
70 * legacy mdp_in_processing is only for DMA2-MDDI
71 * this applies to DMA2 block only
72 */
73uint32 mdp_in_processing = FALSE;
74
75#ifdef CONFIG_FB_MSM_MDP40
76uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
77#else
78uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
79#endif
80
81MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
82
83atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
84
85spinlock_t mdp_spin_lock;
86struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
87struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
88
89static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
90static struct delayed_work mdp_pipe_ctrl_worker;
91
92static boolean mdp_suspended = FALSE;
93DEFINE_MUTEX(mdp_suspend_mutex);
94
95#ifdef CONFIG_FB_MSM_MDP40
96struct mdp_dma_data dma2_data;
97struct mdp_dma_data dma_s_data;
98struct mdp_dma_data dma_e_data;
99ulong mdp4_display_intf;
100#else
101static struct mdp_dma_data dma2_data;
102static struct mdp_dma_data dma_s_data;
103#ifndef CONFIG_FB_MSM_MDP303
104static struct mdp_dma_data dma_e_data;
105#endif
106#endif
107static struct mdp_dma_data dma3_data;
108
109extern ktime_t mdp_dma2_last_update_time;
110
111extern uint32 mdp_dma2_update_time_in_usec;
112extern int mdp_lcd_rd_cnt_offset_slow;
113extern int mdp_lcd_rd_cnt_offset_fast;
114extern int mdp_usec_diff_threshold;
115
116#ifdef CONFIG_FB_MSM_LCDC
117extern int first_pixel_start_x;
118extern int first_pixel_start_y;
119#endif
120
121#ifdef MSM_FB_ENABLE_DBGFS
122struct dentry *mdp_dir;
123#endif
124
125#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
126static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
127#else
128#define mdp_suspend NULL
129#endif
130
131struct timeval mdp_dma2_timeval;
132struct timeval mdp_ppp_timeval;
133
134#ifdef CONFIG_HAS_EARLYSUSPEND
135static struct early_suspend early_suspend;
136#endif
137
138static u32 mdp_irq;
139
140static uint32 mdp_prim_panel_type = NO_PANEL;
141#ifndef CONFIG_FB_MSM_MDP22
142DEFINE_MUTEX(mdp_lut_push_sem);
143static int mdp_lut_i;
144static int mdp_lut_hw_update(struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700145{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 int i;
147 u16 *c[3];
148 u16 r, g, b;
Pavel Machekd480ace2009-09-22 16:47:03 -0700149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150 c[0] = cmap->green;
151 c[1] = cmap->blue;
152 c[2] = cmap->red;
Pavel Machekd480ace2009-09-22 16:47:03 -0700153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 for (i = 0; i < cmap->len; i++) {
155 if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
156 copy_from_user(&g, cmap->green++, sizeof(g)) ||
157 copy_from_user(&b, cmap->blue++, sizeof(b)))
158 return -EFAULT;
159
160#ifdef CONFIG_FB_MSM_MDP40
161 MDP_OUTP(MDP_BASE + 0x94800 +
162#else
163 MDP_OUTP(MDP_BASE + 0x93800 +
164#endif
165 (0x400*mdp_lut_i) + cmap->start*4 + i*4,
166 ((g & 0xff) |
167 ((b & 0xff) << 8) |
168 ((r & 0xff) << 16)));
Pavel Machekd480ace2009-09-22 16:47:03 -0700169 }
170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700172}
173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174static int mdp_lut_push;
175static int mdp_lut_push_i;
176static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 int ret;
179
180 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
181 ret = mdp_lut_hw_update(cmap);
182 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
183
184 if (ret)
185 return ret;
186
187 mutex_lock(&mdp_lut_push_sem);
188 mdp_lut_push = 1;
189 mdp_lut_push_i = mdp_lut_i;
190 mutex_unlock(&mdp_lut_push_sem);
191
192 mdp_lut_i = (mdp_lut_i + 1)%2;
193
194 return 0;
195}
196
197static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
198{
199 int ret;
200
201 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
202 ret = mdp_lut_hw_update(cmap);
203
204 if (ret) {
205 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
206 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -0700207 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208
209 MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
210 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
211 mdp_lut_i = (mdp_lut_i + 1)%2;
212
213 return 0;
214}
215
216static void mdp_lut_enable(void)
217{
218 if (mdp_lut_push) {
219 mutex_lock(&mdp_lut_push_sem);
220 mdp_lut_push = 0;
221 MDP_OUTP(MDP_BASE + 0x90070,
222 (mdp_lut_push_i << 10) | 0x17);
223 mutex_unlock(&mdp_lut_push_sem);
224 }
225}
226
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700227#define MDP_REV42_HIST_MAX_BIN 128
228#define MDP_REV41_HIST_MAX_BIN 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229
230#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700231unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232struct completion mdp_hist_comp;
233boolean mdp_is_hist_start = FALSE;
234#else
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700235static unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236static struct completion mdp_hist_comp;
237static boolean mdp_is_hist_start = FALSE;
238#endif
239static DEFINE_MUTEX(mdp_hist_mutex);
240
241int mdp_histogram_ctrl(boolean en)
242{
243 unsigned long flag;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700244 unsigned long hist_base;
Carl Vanderlip4c4c2862011-10-27 11:28:55 -0700245 uint32_t status;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700246
247 if (mdp_rev >= MDP_REV_40)
248 hist_base = 0x95000;
249 else
250 hist_base = 0x94000;
251
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700252 if (en == TRUE) {
253 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
254 mdp_hist_frame_cnt = 1;
255 mdp_enable_irq(MDP_HISTOGRAM_TERM);
256 spin_lock_irqsave(&mdp_spin_lock, flag);
257 if (mdp_is_hist_start == FALSE && mdp_rev >= MDP_REV_40) {
258 MDP_OUTP(MDP_BASE + hist_base + 0x10, 1);
259 MDP_OUTP(MDP_BASE + hist_base + 0x1c, INTR_HIST_DONE);
260 }
261 spin_unlock_irqrestore(&mdp_spin_lock, flag);
262 MDP_OUTP(MDP_BASE + hist_base + 0x4, mdp_hist_frame_cnt);
263 MDP_OUTP(MDP_BASE + hist_base, 1);
264 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
265 } else {
Carl Vanderlip4c4c2862011-10-27 11:28:55 -0700266 if (mdp_rev >= MDP_REV_40) {
267 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
268 status = inpdw(MDP_BASE + hist_base + 0x1C);
269 status &= ~INTR_HIST_DONE;
270 MDP_OUTP(MDP_BASE + hist_base + 0x1C, status);
271
272 MDP_OUTP(MDP_BASE + hist_base + 0x18, INTR_HIST_DONE);
273 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
274 FALSE);
275 }
276
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700277 mdp_disable_irq(MDP_HISTOGRAM_TERM);
Pavel Machekd480ace2009-09-22 16:47:03 -0700278 }
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700279
Pavel Machekd480ace2009-09-22 16:47:03 -0700280 return 0;
281}
282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283int mdp_start_histogram(struct fb_info *info)
Pavel Machekd480ace2009-09-22 16:47:03 -0700284{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 unsigned long flag;
Pavel Machekd480ace2009-09-22 16:47:03 -0700286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 int ret = 0;
288 mutex_lock(&mdp_hist_mutex);
289 if (mdp_is_hist_start == TRUE) {
290 printk(KERN_ERR "%s histogram already started\n", __func__);
291 ret = -EPERM;
292 goto mdp_hist_start_err;
293 }
294
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700295 ret = mdp_histogram_ctrl(TRUE);
296
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 spin_lock_irqsave(&mdp_spin_lock, flag);
298 mdp_is_hist_start = TRUE;
299 spin_unlock_irqrestore(&mdp_spin_lock, flag);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300
301mdp_hist_start_err:
302 mutex_unlock(&mdp_hist_mutex);
303 return ret;
304
305}
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307int mdp_stop_histogram(struct fb_info *info)
308{
309 unsigned long flag;
310 int ret = 0;
311 mutex_lock(&mdp_hist_mutex);
312 if (!mdp_is_hist_start) {
313 printk(KERN_ERR "%s histogram already stopped\n", __func__);
314 ret = -EPERM;
315 goto mdp_hist_stop_err;
316 }
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 spin_lock_irqsave(&mdp_spin_lock, flag);
319 mdp_is_hist_start = FALSE;
320 spin_unlock_irqrestore(&mdp_spin_lock, flag);
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700321
322 ret = mdp_histogram_ctrl(FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323
324mdp_hist_stop_err:
325 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700326 return ret;
327}
328
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700329static int mdp_copy_hist_data(struct mdp_histogram *hist)
Pavel Machekd480ace2009-09-22 16:47:03 -0700330{
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700331 char *mdp_hist_base;
332 uint32 r_data_offset = 0x100, g_data_offset = 0x200;
333 uint32 b_data_offset = 0x300;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 int ret = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700335
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700336 mutex_lock(&mdp_hist_mutex);
337 if (mdp_rev >= MDP_REV_42) {
338 mdp_hist_base = MDP_BASE + 0x95000;
339 r_data_offset = 0x400;
340 g_data_offset = 0x800;
341 b_data_offset = 0xc00;
342 } else if (mdp_rev >= MDP_REV_40 && mdp_rev <= MDP_REV_41) {
343 mdp_hist_base = MDP_BASE + 0x95000;
344 } else if (mdp_rev >= MDP_REV_30 && mdp_rev <= MDP_REV_31) {
345 mdp_hist_base = MDP_BASE + 0x94000;
346 } else {
347 pr_err("%s(): Unsupported MDP rev %u\n", __func__, mdp_rev);
348 return -EPERM;
349 }
350
351 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
352 if (hist->r) {
353 ret = copy_to_user(hist->r, mdp_hist_base + r_data_offset,
354 hist->bin_cnt * 4);
355 if (ret)
356 goto hist_err;
357 }
358 if (hist->g) {
359 ret = copy_to_user(hist->g, mdp_hist_base + g_data_offset,
360 hist->bin_cnt * 4);
361 if (ret)
362 goto hist_err;
363 }
364 if (hist->b) {
365 ret = copy_to_user(hist->b, mdp_hist_base + b_data_offset,
366 hist->bin_cnt * 4);
367 if (ret)
368 goto hist_err;
369 }
370
371 if (mdp_is_hist_start == TRUE) {
372 MDP_OUTP(mdp_hist_base + 0x004,
373 mdp_hist_frame_cnt);
374 MDP_OUTP(mdp_hist_base, 1);
375 }
376 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
377 mutex_unlock(&mdp_hist_mutex);
378 return 0;
379
380hist_err:
381 printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
382 return ret;
383}
384
385static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
386{
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700387 if (!hist->frame_cnt || (hist->bin_cnt == 0))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 return -EINVAL;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700389
390 if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN)
391 || (mdp_rev == MDP_REV_42 &&
392 hist->bin_cnt > MDP_REV42_HIST_MAX_BIN))
393 return -EINVAL;
394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 mutex_lock(&mdp_hist_mutex);
396 if (!mdp_is_hist_start) {
397 printk(KERN_ERR "%s histogram not started\n", __func__);
398 mutex_unlock(&mdp_hist_mutex);
399 return -EPERM;
400 }
401 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403 INIT_COMPLETION(mdp_hist_comp);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700404 mdp_hist_frame_cnt = hist->frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 wait_for_completion_killable(&mdp_hist_comp);
406
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700407 return mdp_copy_hist_data(hist);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408}
409#endif
410
411/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
412
413int mdp_ppp_pipe_wait(void)
414{
415 int ret = 1;
416
417 /* wait 5 seconds for the operation to complete before declaring
418 the MDP hung */
419
420 if (mdp_ppp_waiting == TRUE) {
421 ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
422 5 * HZ);
423
424 if (!ret)
425 printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
426 __func__);
Pavel Machekd480ace2009-09-22 16:47:03 -0700427 }
428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 return ret;
430}
Pavel Machekd480ace2009-09-22 16:47:03 -0700431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432static DEFINE_SPINLOCK(mdp_lock);
433static int mdp_irq_mask;
434static int mdp_irq_enabled;
Pavel Machekd480ace2009-09-22 16:47:03 -0700435
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436/*
437 * mdp_enable_irq: can not be called from isr
438 */
439void mdp_enable_irq(uint32 term)
440{
441 unsigned long irq_flags;
442
443 spin_lock_irqsave(&mdp_lock, irq_flags);
444 if (mdp_irq_mask & term) {
445 printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
446 __func__, term, mdp_irq_mask, mdp_irq_enabled);
447 } else {
448 mdp_irq_mask |= term;
449 if (mdp_irq_mask && !mdp_irq_enabled) {
450 mdp_irq_enabled = 1;
451 enable_irq(mdp_irq);
452 }
453 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700454 spin_unlock_irqrestore(&mdp_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455}
456
457/*
458 * mdp_disable_irq: can not be called from isr
459 */
460void mdp_disable_irq(uint32 term)
461{
462 unsigned long irq_flags;
463
464 spin_lock_irqsave(&mdp_lock, irq_flags);
465 if (!(mdp_irq_mask & term)) {
466 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
467 __func__, term, mdp_irq_mask, mdp_irq_enabled);
468 } else {
469 mdp_irq_mask &= ~term;
470 if (!mdp_irq_mask && mdp_irq_enabled) {
471 mdp_irq_enabled = 0;
472 disable_irq(mdp_irq);
473 }
474 }
475 spin_unlock_irqrestore(&mdp_lock, irq_flags);
476}
477
478void mdp_disable_irq_nosync(uint32 term)
479{
480 spin_lock(&mdp_lock);
481 if (!(mdp_irq_mask & term)) {
482 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
483 __func__, term, mdp_irq_mask, mdp_irq_enabled);
484 } else {
485 mdp_irq_mask &= ~term;
486 if (!mdp_irq_mask && mdp_irq_enabled) {
487 mdp_irq_enabled = 0;
488 disable_irq_nosync(mdp_irq);
489 }
490 }
491 spin_unlock(&mdp_lock);
492}
493
494void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
495{
496 /* complete all the writes before starting */
497 wmb();
498
499 /* kick off PPP engine */
500 if (term == MDP_PPP_TERM) {
501 if (mdp_debug[MDP_PPP_BLOCK])
502 jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
503
504 /* let's turn on PPP block */
505 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
506
507 mdp_enable_irq(term);
508 INIT_COMPLETION(mdp_ppp_comp);
509 mdp_ppp_waiting = TRUE;
510 outpdw(MDP_BASE + 0x30, 0x1000);
511 wait_for_completion_killable(&mdp_ppp_comp);
512 mdp_disable_irq(term);
513
514 if (mdp_debug[MDP_PPP_BLOCK]) {
515 struct timeval now;
516
517 jiffies_to_timeval(jiffies, &now);
518 mdp_ppp_timeval.tv_usec =
519 now.tv_usec - mdp_ppp_timeval.tv_usec;
520 MSM_FB_DEBUG("MDP-PPP: %d\n",
521 (int)mdp_ppp_timeval.tv_usec);
522 }
523 } else if (term == MDP_DMA2_TERM) {
524 if (mdp_debug[MDP_DMA2_BLOCK]) {
525 MSM_FB_DEBUG("MDP-DMA2: %d\n",
526 (int)mdp_dma2_timeval.tv_usec);
527 jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
528 }
529 /* DMA update timestamp */
530 mdp_dma2_last_update_time = ktime_get_real();
531 /* let's turn on DMA2 block */
532#if 0
533 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
534#endif
535#ifdef CONFIG_FB_MSM_MDP22
536 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
537#else
538 mdp_lut_enable();
539
540#ifdef CONFIG_FB_MSM_MDP40
541 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
542#else
543 outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
544
545#ifdef CONFIG_FB_MSM_MDP303
546
547#ifdef CONFIG_FB_MSM_MIPI_DSI
kuogee hsieh8717a172011-09-05 09:57:58 -0700548 mipi_dsi_cmd_mdp_start();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549#endif
550
551#endif
552
553#endif
554#endif
555#ifdef CONFIG_FB_MSM_MDP40
556 } else if (term == MDP_DMA_S_TERM) {
557 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
558 outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
559 } else if (term == MDP_DMA_E_TERM) {
560 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
561 outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
562 } else if (term == MDP_OVERLAY0_TERM) {
563 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
564 mdp_lut_enable();
565 outpdw(MDP_BASE + 0x0004, 0);
566 } else if (term == MDP_OVERLAY1_TERM) {
567 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
568 mdp_lut_enable();
569 outpdw(MDP_BASE + 0x0008, 0);
570 }
571#else
572 } else if (term == MDP_DMA_S_TERM) {
573 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
574 outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
575 } else if (term == MDP_DMA_E_TERM) {
576 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
577 outpdw(MDP_BASE + 0x004C, 0x0);
578 }
579#endif
580}
581static int mdp_clk_rate;
582static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
583static int pdev_list_cnt;
584
585static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
586{
587 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
588}
589void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
590 boolean isr)
591{
592 boolean mdp_all_blocks_off = TRUE;
593 int i;
594 unsigned long flag;
595 struct msm_fb_panel_data *pdata;
596
597 /*
598 * It is assumed that if isr = TRUE then start = OFF
599 * if start = ON when isr = TRUE it could happen that the usercontext
600 * could turn off the clocks while the interrupt is updating the
601 * power to ON
602 */
603 WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
604
605 spin_lock_irqsave(&mdp_spin_lock, flag);
606 if (MDP_BLOCK_POWER_ON == state) {
607 atomic_inc(&mdp_block_power_cnt[block]);
608
609 if (MDP_DMA2_BLOCK == block)
610 mdp_in_processing = TRUE;
611 } else {
612 atomic_dec(&mdp_block_power_cnt[block]);
613
614 if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
615 /*
616 * Master has to serve a request to power off MDP always
617 * It also has a timer to power off. So, in case of
618 * timer expires first and DMA2 finishes later,
619 * master has to power off two times
620 * There shouldn't be multiple power-off request for
621 * other blocks
622 */
623 if (block != MDP_MASTER_BLOCK) {
624 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
625 multiple power-off request\n", block);
626 }
627 atomic_set(&mdp_block_power_cnt[block], 0);
628 }
629
630 if (MDP_DMA2_BLOCK == block)
631 mdp_in_processing = FALSE;
632 }
633 spin_unlock_irqrestore(&mdp_spin_lock, flag);
634
635 /*
636 * If it's in isr, we send our request to workqueue.
637 * Otherwise, processing happens in the current context
638 */
639 if (isr) {
640 if (mdp_current_clk_on) {
641 /* checking all blocks power state */
642 for (i = 0; i < MDP_MAX_BLOCK; i++) {
643 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
644 mdp_all_blocks_off = FALSE;
645 break;
646 }
647 }
648
649 if (mdp_all_blocks_off) {
650 /* send workqueue to turn off mdp power */
651 queue_delayed_work(mdp_pipe_ctrl_wq,
652 &mdp_pipe_ctrl_worker,
653 mdp_timer_duration);
654 }
655 }
656 } else {
657 down(&mdp_pipe_ctrl_mutex);
658 /* checking all blocks power state */
659 for (i = 0; i < MDP_MAX_BLOCK; i++) {
660 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
661 mdp_all_blocks_off = FALSE;
662 break;
663 }
664 }
665
666 /*
667 * find out whether a delayable work item is currently
668 * pending
669 */
670
671 if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
672 /*
673 * try to cancel the current work if it fails to
674 * stop (which means del_timer can't delete it
675 * from the list, it's about to expire and run),
676 * we have to let it run. queue_delayed_work won't
677 * accept the next job which is same as
678 * queue_delayed_work(mdp_timer_duration = 0)
679 */
680 cancel_delayed_work(&mdp_pipe_ctrl_worker);
681 }
682
683 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
684 mutex_lock(&mdp_suspend_mutex);
685 if (block == MDP_MASTER_BLOCK || mdp_suspended) {
686 mdp_current_clk_on = FALSE;
687 mb();
688 /* turn off MDP clks */
689 mdp_vsync_clk_disable();
690 for (i = 0; i < pdev_list_cnt; i++) {
691 pdata = (struct msm_fb_panel_data *)
692 pdev_list[i]->dev.platform_data;
693 if (pdata && pdata->clk_func)
694 pdata->clk_func(0);
695 }
696 if (mdp_clk != NULL) {
697 mdp_clk_rate = clk_get_rate(mdp_clk);
698 clk_disable(mdp_clk);
699 if (mdp_hw_revision <=
700 MDP4_REVISION_V2_1 &&
701 mdp_clk_rate > 122880000) {
702 clk_set_rate(mdp_clk,
703 122880000);
704 }
705 MSM_FB_DEBUG("MDP CLK OFF\n");
706 }
707 if (mdp_pclk != NULL) {
708 clk_disable(mdp_pclk);
709 MSM_FB_DEBUG("MDP PCLK OFF\n");
710 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 if (mdp_lut_clk != NULL)
712 clk_disable(mdp_lut_clk);
713 } else {
714 /* send workqueue to turn off mdp power */
715 queue_delayed_work(mdp_pipe_ctrl_wq,
716 &mdp_pipe_ctrl_worker,
717 mdp_timer_duration);
718 }
719 mutex_unlock(&mdp_suspend_mutex);
720 } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
721 mdp_current_clk_on = TRUE;
722 /* turn on MDP clks */
723 for (i = 0; i < pdev_list_cnt; i++) {
724 pdata = (struct msm_fb_panel_data *)
725 pdev_list[i]->dev.platform_data;
726 if (pdata && pdata->clk_func)
727 pdata->clk_func(1);
728 }
729 if (mdp_clk != NULL) {
730 if (mdp_hw_revision <=
731 MDP4_REVISION_V2_1 &&
732 mdp_clk_rate > 122880000) {
733 clk_set_rate(mdp_clk,
734 mdp_clk_rate);
735 }
736 clk_enable(mdp_clk);
737 MSM_FB_DEBUG("MDP CLK ON\n");
738 }
739 if (mdp_pclk != NULL) {
740 clk_enable(mdp_pclk);
741 MSM_FB_DEBUG("MDP PCLK ON\n");
742 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743 if (mdp_lut_clk != NULL)
744 clk_enable(mdp_lut_clk);
745 mdp_vsync_clk_enable();
746 }
747 up(&mdp_pipe_ctrl_mutex);
748 }
749}
750
751#ifndef CONFIG_FB_MSM_MDP40
752irqreturn_t mdp_isr(int irq, void *ptr)
753{
754 uint32 mdp_interrupt = 0;
755 struct mdp_dma_data *dma;
756
757 mdp_is_in_isr = TRUE;
758 do {
759 mdp_interrupt = inp32(MDP_INTR_STATUS);
760 outp32(MDP_INTR_CLEAR, mdp_interrupt);
761
762 mdp_interrupt &= mdp_intr_mask;
763
764 if (mdp_interrupt & TV_ENC_UNDERRUN) {
765 mdp_interrupt &= ~(TV_ENC_UNDERRUN);
766 mdp_tv_underflow_cnt++;
767 }
768
769 if (!mdp_interrupt)
770 break;
771
772 /* DMA3 TV-Out Start */
773 if (mdp_interrupt & TV_OUT_DMA3_START) {
774 /* let's disable TV out interrupt */
775 mdp_intr_mask &= ~TV_OUT_DMA3_START;
776 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
777
778 dma = &dma3_data;
779 if (dma->waiting) {
780 dma->waiting = FALSE;
781 complete(&dma->comp);
782 }
783 }
784#ifndef CONFIG_FB_MSM_MDP22
785 if (mdp_interrupt & MDP_HIST_DONE) {
786 outp32(MDP_BASE + 0x94018, 0x3);
787 outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 complete(&mdp_hist_comp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 }
790
791 /* LCDC UnderFlow */
792 if (mdp_interrupt & LCDC_UNDERFLOW) {
793 mdp_lcdc_underflow_cnt++;
794 /*when underflow happens HW resets all the histogram
795 registers that were set before so restore them back
796 to normal.*/
797 MDP_OUTP(MDP_BASE + 0x94010, 1);
798 MDP_OUTP(MDP_BASE + 0x9401c, 2);
799 if (mdp_is_hist_start == TRUE) {
800 MDP_OUTP(MDP_BASE + 0x94004,
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700801 mdp_hist_frame_cnt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802 MDP_OUTP(MDP_BASE + 0x94000, 1);
803 }
804 }
805 /* LCDC Frame Start */
806 if (mdp_interrupt & LCDC_FRAME_START) {
807 /* let's disable LCDC interrupt */
808 mdp_intr_mask &= ~LCDC_FRAME_START;
809 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
810
811 dma = &dma2_data;
812 if (dma->waiting) {
813 dma->waiting = FALSE;
814 complete(&dma->comp);
815 }
816 }
817
818 /* DMA2 LCD-Out Complete */
819 if (mdp_interrupt & MDP_DMA_S_DONE) {
820 dma = &dma_s_data;
821 dma->busy = FALSE;
822 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
823 TRUE);
824 complete(&dma->comp);
825 }
826 /* DMA_E LCD-Out Complete */
827 if (mdp_interrupt & MDP_DMA_E_DONE) {
828 dma = &dma_s_data;
829 dma->busy = FALSE;
830 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
831 TRUE);
832 complete(&dma->comp);
833 }
834
835#endif
836
837 /* DMA2 LCD-Out Complete */
838 if (mdp_interrupt & MDP_DMA_P_DONE) {
839 struct timeval now;
840
841 mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
842 mdp_dma2_last_update_time);
843 if (mdp_debug[MDP_DMA2_BLOCK]) {
844 jiffies_to_timeval(jiffies, &now);
845 mdp_dma2_timeval.tv_usec =
846 now.tv_usec - mdp_dma2_timeval.tv_usec;
847 }
848#ifndef CONFIG_FB_MSM_MDP303
849 dma = &dma2_data;
850 dma->busy = FALSE;
851 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
852 TRUE);
853 complete(&dma->comp);
854#else
855 if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
856 dma = &dma2_data;
857 dma->busy = FALSE;
858 mdp_pipe_ctrl(MDP_DMA2_BLOCK,
859 MDP_BLOCK_POWER_OFF, TRUE);
860 complete(&dma->comp);
861 }
862#endif
863 }
864 /* PPP Complete */
865 if (mdp_interrupt & MDP_PPP_DONE) {
866#ifdef CONFIG_FB_MSM_MDP31
867 MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
868#endif
869 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
870 if (mdp_ppp_waiting) {
871 mdp_ppp_waiting = FALSE;
872 complete(&mdp_ppp_comp);
873 }
874 }
875 } while (1);
876
877 mdp_is_in_isr = FALSE;
878
Pavel Machekd480ace2009-09-22 16:47:03 -0700879 return IRQ_HANDLED;
880}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883static void mdp_drv_init(void)
Pavel Machekd480ace2009-09-22 16:47:03 -0700884{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 int i;
Pavel Machekd480ace2009-09-22 16:47:03 -0700886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 for (i = 0; i < MDP_MAX_BLOCK; i++) {
888 mdp_debug[i] = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700889 }
890
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 /* initialize spin lock and workqueue */
892 spin_lock_init(&mdp_spin_lock);
893 mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
894 mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
895 mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
896 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
897 mdp_pipe_ctrl_workqueue_handler);
Pavel Machekd480ace2009-09-22 16:47:03 -0700898
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 /* initialize semaphore */
900 init_completion(&mdp_ppp_comp);
901 sema_init(&mdp_ppp_mutex, 1);
902 sema_init(&mdp_pipe_ctrl_mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 dma2_data.busy = FALSE;
905 dma2_data.dmap_busy = FALSE;
906 dma2_data.waiting = FALSE;
907 init_completion(&dma2_data.comp);
908 init_completion(&dma2_data.dmap_comp);
909 sema_init(&dma2_data.mutex, 1);
910 mutex_init(&dma2_data.ov_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 dma3_data.busy = FALSE;
913 dma3_data.waiting = FALSE;
914 init_completion(&dma3_data.comp);
915 sema_init(&dma3_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 dma_s_data.busy = FALSE;
918 dma_s_data.waiting = FALSE;
919 init_completion(&dma_s_data.comp);
920 sema_init(&dma_s_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922#ifndef CONFIG_FB_MSM_MDP303
923 dma_e_data.busy = FALSE;
924 dma_e_data.waiting = FALSE;
925 init_completion(&dma_e_data.comp);
926 mutex_init(&dma_e_data.ov_mutex);
927#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929#ifndef CONFIG_FB_MSM_MDP22
930 init_completion(&mdp_hist_comp);
931#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 /* initializing mdp power block counter to 0 */
934 for (i = 0; i < MDP_MAX_BLOCK; i++) {
935 atomic_set(&mdp_block_power_cnt[i], 0);
Pavel Machekd480ace2009-09-22 16:47:03 -0700936 }
937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938#ifdef MSM_FB_ENABLE_DBGFS
939 {
940 struct dentry *root;
941 char sub_name[] = "mdp";
Pavel Machekd480ace2009-09-22 16:47:03 -0700942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943 root = msm_fb_get_debugfs_root();
944 if (root != NULL) {
945 mdp_dir = debugfs_create_dir(sub_name, root);
946
947 if (mdp_dir) {
948 msm_fb_debugfs_file_create(mdp_dir,
949 "dma2_update_time_in_usec",
950 (u32 *) &mdp_dma2_update_time_in_usec);
951 msm_fb_debugfs_file_create(mdp_dir,
952 "vs_rdcnt_slow",
953 (u32 *) &mdp_lcd_rd_cnt_offset_slow);
954 msm_fb_debugfs_file_create(mdp_dir,
955 "vs_rdcnt_fast",
956 (u32 *) &mdp_lcd_rd_cnt_offset_fast);
957 msm_fb_debugfs_file_create(mdp_dir,
958 "mdp_usec_diff_threshold",
959 (u32 *) &mdp_usec_diff_threshold);
960 msm_fb_debugfs_file_create(mdp_dir,
961 "mdp_current_clk_on",
962 (u32 *) &mdp_current_clk_on);
963#ifdef CONFIG_FB_MSM_LCDC
964 msm_fb_debugfs_file_create(mdp_dir,
965 "lcdc_start_x",
966 (u32 *) &first_pixel_start_x);
967 msm_fb_debugfs_file_create(mdp_dir,
968 "lcdc_start_y",
969 (u32 *) &first_pixel_start_y);
970#endif
971 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700972 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700973 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974#endif
975}
976
977static int mdp_probe(struct platform_device *pdev);
978static int mdp_remove(struct platform_device *pdev);
979
980static int mdp_runtime_suspend(struct device *dev)
981{
982 dev_dbg(dev, "pm_runtime: suspending...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700983 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700984}
985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986static int mdp_runtime_resume(struct device *dev)
Pavel Machekd480ace2009-09-22 16:47:03 -0700987{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988 dev_dbg(dev, "pm_runtime: resuming...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700989 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700990}
991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992static struct dev_pm_ops mdp_dev_pm_ops = {
993 .runtime_suspend = mdp_runtime_suspend,
994 .runtime_resume = mdp_runtime_resume,
Pavel Machekd480ace2009-09-22 16:47:03 -0700995};
996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997
998static struct platform_driver mdp_driver = {
999 .probe = mdp_probe,
1000 .remove = mdp_remove,
1001#ifndef CONFIG_HAS_EARLYSUSPEND
1002 .suspend = mdp_suspend,
1003 .resume = NULL,
1004#endif
1005 .shutdown = NULL,
1006 .driver = {
1007 /*
1008 * Driver name must match the device name added in
1009 * platform.c.
1010 */
1011 .name = "mdp",
1012 .pm = &mdp_dev_pm_ops,
1013 },
1014};
1015
1016static int mdp_off(struct platform_device *pdev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001017{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 int ret = 0;
1019 mdp_histogram_ctrl(FALSE);
1020
1021 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1022 ret = panel_next_off(pdev);
1023 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1024
1025 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -07001026}
1027
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028static int mdp_on(struct platform_device *pdev)
1029{
1030 int ret = 0;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001031
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001033 struct msm_fb_data_type *mfd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1035 if (is_mdp4_hw_reset()) {
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001036 mfd = platform_get_drvdata(pdev);
1037 mdp_vsync_cfg_regs(mfd, FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038 mdp4_hw_init();
1039 outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
1040 }
1041 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1042#endif
1043 mdp_histogram_ctrl(TRUE);
1044
1045 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1046 ret = panel_next_on(pdev);
1047 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1048 return ret;
1049}
1050
1051static int mdp_resource_initialized;
1052static struct msm_panel_common_pdata *mdp_pdata;
1053
1054uint32 mdp_hw_revision;
1055
1056/*
1057 * mdp_hw_revision:
1058 * 0 == V1
1059 * 1 == V2
1060 * 2 == V2.1
1061 *
1062 */
1063void mdp_hw_version(void)
1064{
1065 char *cp;
1066 uint32 *hp;
1067
1068 if (mdp_pdata == NULL)
1069 return;
1070
1071 mdp_hw_revision = MDP4_REVISION_NONE;
1072 if (mdp_pdata->hw_revision_addr == 0)
1073 return;
1074
1075 /* tlmmgpio2 shadow */
1076 cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
1077
1078 if (cp == NULL)
1079 return;
1080
1081 hp = (uint32 *)cp; /* HW_REVISION_NUMBER */
1082 mdp_hw_revision = *hp;
1083 iounmap(cp);
1084
1085 mdp_hw_revision >>= 28; /* bit 31:28 */
1086 mdp_hw_revision &= 0x0f;
1087
1088 MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
1089 __func__, mdp_hw_revision);
1090}
1091
kuogee hsieh5a7f32c2011-08-31 17:51:34 -07001092int mdp4_writeback_offset(void)
1093{
1094 int off = 0;
1095
1096 if (mdp_pdata->writeback_offset)
1097 off = mdp_pdata->writeback_offset();
1098
1099 pr_debug("%s: writeback_offset=%d %x\n", __func__, off, off);
1100
1101 return off;
1102}
1103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104#ifdef CONFIG_FB_MSM_MDP40
1105static void configure_mdp_core_clk_table(uint32 min_clk_rate)
1106{
1107 uint8 count;
1108 uint32 current_rate;
1109 if (mdp_clk && mdp_pdata
1110 && mdp_pdata->mdp_core_clk_table) {
1111 if (clk_set_min_rate(mdp_clk,
1112 min_clk_rate) < 0)
1113 printk(KERN_ERR "%s: clk_set_min_rate failed\n",
1114 __func__);
1115 else {
1116 count = 0;
1117 current_rate = clk_get_rate(mdp_clk);
1118 while (count < mdp_pdata->num_mdp_clk) {
1119 if (mdp_pdata->mdp_core_clk_table[count]
1120 < current_rate) {
1121 mdp_pdata->
1122 mdp_core_clk_table[count] =
1123 current_rate;
1124 }
1125 count++;
1126 }
1127 }
1128 }
1129}
1130#endif
1131
1132#ifdef CONFIG_MSM_BUS_SCALING
1133static uint32_t mdp_bus_scale_handle;
1134int mdp_bus_scale_update_request(uint32_t index)
1135{
1136 if (!mdp_pdata && (!mdp_pdata->mdp_bus_scale_table
1137 || index > (mdp_pdata->mdp_bus_scale_table->num_usecases - 1))) {
1138 printk(KERN_ERR "%s invalid table or index\n", __func__);
1139 return -EINVAL;
1140 }
1141 if (mdp_bus_scale_handle < 1) {
1142 printk(KERN_ERR "%s invalid bus handle\n", __func__);
1143 return -EINVAL;
1144 }
1145 return msm_bus_scale_client_update_request(mdp_bus_scale_handle,
1146 index);
1147}
1148#endif
1149DEFINE_MUTEX(mdp_clk_lock);
1150int mdp_set_core_clk(uint16 perf_level)
1151{
1152 int ret = -EINVAL;
1153 if (mdp_clk && mdp_pdata
1154 && mdp_pdata->mdp_core_clk_table) {
1155 if (perf_level > mdp_pdata->num_mdp_clk)
1156 printk(KERN_ERR "%s invalid perf level\n", __func__);
1157 else {
1158 mutex_lock(&mdp_clk_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 ret = clk_set_rate(mdp_clk,
1160 mdp_pdata->
1161 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1162 - perf_level]);
1163 mutex_unlock(&mdp_clk_lock);
1164 if (ret) {
1165 printk(KERN_ERR "%s unable to set mdp_core_clk rate\n",
1166 __func__);
1167 }
1168 }
1169 }
1170 return ret;
1171}
1172
1173unsigned long mdp_get_core_clk(void)
1174{
1175 unsigned long clk_rate = 0;
1176 if (mdp_clk) {
1177 mutex_lock(&mdp_clk_lock);
1178 clk_rate = clk_get_rate(mdp_clk);
1179 mutex_unlock(&mdp_clk_lock);
1180 }
1181
1182 return clk_rate;
1183}
1184
1185unsigned long mdp_perf_level2clk_rate(uint32 perf_level)
1186{
1187 unsigned long clk_rate = 0;
1188
1189 if (mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1190 if (perf_level > mdp_pdata->num_mdp_clk) {
1191 printk(KERN_ERR "%s invalid perf level\n", __func__);
1192 clk_rate = mdp_get_core_clk();
1193 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194 clk_rate = mdp_pdata->
1195 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1196 - perf_level];
1197 }
1198 } else
1199 clk_rate = mdp_get_core_clk();
1200
1201 return clk_rate;
1202}
1203
1204static int mdp_irq_clk_setup(void)
1205{
1206 int ret;
1207
1208#ifdef CONFIG_FB_MSM_MDP40
1209 ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
1210#else
1211 ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
1212#endif
1213 if (ret) {
1214 printk(KERN_ERR "mdp request_irq() failed!\n");
1215 return ret;
1216 }
1217 disable_irq(mdp_irq);
1218
1219 footswitch = regulator_get(NULL, "fs_mdp");
1220 if (IS_ERR(footswitch))
1221 footswitch = NULL;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001222 else
1223 regulator_enable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224
1225 mdp_clk = clk_get(NULL, "mdp_clk");
1226 if (IS_ERR(mdp_clk)) {
1227 ret = PTR_ERR(mdp_clk);
1228 printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
1229 free_irq(mdp_irq, 0);
1230 return ret;
1231 }
1232
1233 mdp_pclk = clk_get(NULL, "mdp_pclk");
1234 if (IS_ERR(mdp_pclk))
1235 mdp_pclk = NULL;
1236
1237 if (mdp_rev == MDP_REV_42) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 mdp_lut_clk = clk_get(NULL, "lut_mdp");
1239 if (IS_ERR(mdp_lut_clk)) {
1240 ret = PTR_ERR(mdp_lut_clk);
1241 pr_err("can't get mdp_clk error:%d!\n", ret);
1242 clk_put(mdp_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 free_irq(mdp_irq, 0);
1244 return ret;
1245 }
1246 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 mdp_lut_clk = NULL;
1248 }
1249
1250#ifdef CONFIG_FB_MSM_MDP40
1251 /*
1252 * mdp_clk should greater than mdp_pclk always
1253 */
1254 if (mdp_pdata && mdp_pdata->mdp_core_clk_rate) {
1255 mutex_lock(&mdp_clk_lock);
1256 clk_set_rate(mdp_clk, mdp_pdata->mdp_core_clk_rate);
1257 if (mdp_lut_clk != NULL)
1258 clk_set_rate(mdp_lut_clk, mdp_pdata->mdp_core_clk_rate);
1259 mutex_unlock(&mdp_clk_lock);
1260 }
1261 MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
1262#endif
1263 return 0;
1264}
1265
1266static int mdp_probe(struct platform_device *pdev)
1267{
1268 struct platform_device *msm_fb_dev = NULL;
1269 struct msm_fb_data_type *mfd;
1270 struct msm_fb_panel_data *pdata = NULL;
1271 int rc;
1272 resource_size_t size ;
1273#ifdef CONFIG_FB_MSM_MDP40
1274 int intf, if_no;
1275#else
1276 unsigned long flag;
1277#endif
1278#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
1279 struct mipi_panel_info *mipi;
1280#endif
1281
1282 if ((pdev->id == 0) && (pdev->num_resources > 0)) {
1283 mdp_pdata = pdev->dev.platform_data;
1284
1285 size = resource_size(&pdev->resource[0]);
1286 msm_mdp_base = ioremap(pdev->resource[0].start, size);
1287
1288 MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
1289 (int)pdev->resource[0].start, (int)msm_mdp_base);
1290
1291 if (unlikely(!msm_mdp_base))
1292 return -ENOMEM;
1293
1294 mdp_irq = platform_get_irq(pdev, 0);
1295 if (mdp_irq < 0) {
1296 pr_err("mdp: can not get mdp irq\n");
1297 return -ENOMEM;
1298 }
1299
1300 mdp_rev = mdp_pdata->mdp_rev;
1301 rc = mdp_irq_clk_setup();
1302
1303 if (rc)
1304 return rc;
1305
1306 mdp_hw_version();
1307
1308 /* initializing mdp hw */
1309#ifdef CONFIG_FB_MSM_MDP40
1310 mdp4_hw_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001311#else
1312 mdp_hw_init();
1313#endif
1314
1315#ifdef CONFIG_FB_MSM_OVERLAY
1316 mdp_hw_cursor_init();
1317#endif
1318
1319 mdp_resource_initialized = 1;
1320 return 0;
1321 }
1322
1323 if (!mdp_resource_initialized)
1324 return -EPERM;
1325
1326 mfd = platform_get_drvdata(pdev);
1327
1328 if (!mfd)
1329 return -ENODEV;
1330
1331 if (mfd->key != MFD_KEY)
1332 return -EINVAL;
1333
1334 if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
1335 return -ENOMEM;
1336
1337 msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
1338 if (!msm_fb_dev)
1339 return -ENOMEM;
1340
1341 /* link to the latest pdev */
1342 mfd->pdev = msm_fb_dev;
1343
1344 /* add panel data */
1345 if (platform_device_add_data
1346 (msm_fb_dev, pdev->dev.platform_data,
1347 sizeof(struct msm_fb_panel_data))) {
1348 printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
1349 rc = -ENOMEM;
1350 goto mdp_probe_err;
1351 }
1352 /* data chain */
1353 pdata = msm_fb_dev->dev.platform_data;
1354 pdata->on = mdp_on;
1355 pdata->off = mdp_off;
1356 pdata->next = pdev;
1357
1358 mdp_prim_panel_type = mfd->panel.type;
1359 switch (mfd->panel.type) {
1360 case EXT_MDDI_PANEL:
1361 case MDDI_PANEL:
1362 case EBI2_PANEL:
1363 INIT_WORK(&mfd->dma_update_worker,
1364 mdp_lcd_update_workqueue_handler);
1365 INIT_WORK(&mfd->vsync_resync_worker,
1366 mdp_vsync_resync_workqueue_handler);
1367 mfd->hw_refresh = FALSE;
1368
1369 if (mfd->panel.type == EXT_MDDI_PANEL) {
1370 /* 15 fps -> 66 msec */
1371 mfd->refresh_timer_duration = (66 * HZ / 1000);
1372 } else {
1373 /* 24 fps -> 42 msec */
1374 mfd->refresh_timer_duration = (42 * HZ / 1000);
1375 }
1376
1377#ifdef CONFIG_FB_MSM_MDP22
1378 mfd->dma_fnc = mdp_dma2_update;
1379 mfd->dma = &dma2_data;
1380#else
1381 if (mfd->panel_info.pdest == DISPLAY_1) {
1382#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
1383 mfd->dma_fnc = mdp4_mddi_overlay;
1384 mfd->cursor_update = mdp4_mddi_overlay_cursor;
1385#else
1386 mfd->dma_fnc = mdp_dma2_update;
1387#endif
1388 mfd->dma = &dma2_data;
1389 mfd->lut_update = mdp_lut_update_nonlcdc;
1390 mfd->do_histogram = mdp_do_histogram;
1391 } else {
1392 mfd->dma_fnc = mdp_dma_s_update;
1393 mfd->dma = &dma_s_data;
1394 }
1395#endif
1396 if (mdp_pdata)
1397 mfd->vsync_gpio = mdp_pdata->gpio;
1398 else
1399 mfd->vsync_gpio = -1;
1400
1401#ifdef CONFIG_FB_MSM_MDP40
1402 if (mfd->panel.type == EBI2_PANEL)
1403 intf = EBI2_INTF;
1404 else
1405 intf = MDDI_INTF;
1406
1407 if (mfd->panel_info.pdest == DISPLAY_1)
1408 if_no = PRIMARY_INTF_SEL;
1409 else
1410 if_no = SECONDARY_INTF_SEL;
1411
1412 mdp4_display_intf_sel(if_no, intf);
1413#endif
1414 mdp_config_vsync(mfd);
1415 break;
1416
1417#ifdef CONFIG_FB_MSM_MIPI_DSI
1418 case MIPI_VIDEO_PANEL:
1419#ifndef CONFIG_FB_MSM_MDP303
1420 pdata->on = mdp4_dsi_video_on;
1421 pdata->off = mdp4_dsi_video_off;
1422 mfd->hw_refresh = TRUE;
1423 mfd->dma_fnc = mdp4_dsi_video_overlay;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -07001424 mfd->lut_update = mdp_lut_update_lcdc;
1425 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426 if (mfd->panel_info.pdest == DISPLAY_1) {
1427 if_no = PRIMARY_INTF_SEL;
1428 mfd->dma = &dma2_data;
1429 } else {
1430 if_no = EXTERNAL_INTF_SEL;
1431 mfd->dma = &dma_e_data;
1432 }
1433 mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
1434#else
1435 pdata->on = mdp_dsi_video_on;
1436 pdata->off = mdp_dsi_video_off;
1437 mfd->hw_refresh = TRUE;
1438 mfd->dma_fnc = mdp_dsi_video_update;
1439 mfd->do_histogram = mdp_do_histogram;
1440 if (mfd->panel_info.pdest == DISPLAY_1)
1441 mfd->dma = &dma2_data;
1442 else {
1443 printk(KERN_ERR "Invalid Selection of destination panel\n");
1444 rc = -ENODEV;
1445 goto mdp_probe_err;
1446 }
1447
1448#endif
Adrian Salido-Morenod1b9d7a2011-10-14 18:18:51 -07001449 if (mdp_rev >= MDP_REV_40)
1450 mfd->cursor_update = mdp_hw_cursor_sync_update;
1451 else
1452 mfd->cursor_update = mdp_hw_cursor_update;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 break;
1454
1455 case MIPI_CMD_PANEL:
1456#ifndef CONFIG_FB_MSM_MDP303
1457 mfd->dma_fnc = mdp4_dsi_cmd_overlay;
1458#ifdef CONFIG_FB_MSM_MDP40
1459 mipi = &mfd->panel_info.mipi;
1460 configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
1461#endif
1462 if (mfd->panel_info.pdest == DISPLAY_1) {
1463 if_no = PRIMARY_INTF_SEL;
1464 mfd->dma = &dma2_data;
1465 } else {
1466 if_no = SECONDARY_INTF_SEL;
1467 mfd->dma = &dma_s_data;
1468 }
Carl Vanderlip18f63082011-07-22 12:32:33 -07001469 mfd->lut_update = mdp_lut_update_nonlcdc;
1470 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
1472#else
1473 mfd->dma_fnc = mdp_dma2_update;
1474 mfd->do_histogram = mdp_do_histogram;
1475 if (mfd->panel_info.pdest == DISPLAY_1)
1476 mfd->dma = &dma2_data;
1477 else {
1478 printk(KERN_ERR "Invalid Selection of destination panel\n");
1479 rc = -ENODEV;
1480 goto mdp_probe_err;
1481 }
1482#endif
1483 mdp_config_vsync(mfd);
1484 break;
1485#endif
1486
1487#ifdef CONFIG_FB_MSM_DTV
1488 case DTV_PANEL:
1489 pdata->on = mdp4_dtv_on;
1490 pdata->off = mdp4_dtv_off;
1491 mfd->hw_refresh = TRUE;
1492 mfd->cursor_update = mdp_hw_cursor_update;
1493 mfd->dma_fnc = mdp4_dtv_overlay;
1494 mfd->dma = &dma_e_data;
1495 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1496 break;
1497#endif
1498 case HDMI_PANEL:
1499 case LCDC_PANEL:
1500 pdata->on = mdp_lcdc_on;
1501 pdata->off = mdp_lcdc_off;
1502 mfd->hw_refresh = TRUE;
1503#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
1504 mfd->cursor_update = mdp_hw_cursor_sync_update;
1505#else
1506 mfd->cursor_update = mdp_hw_cursor_update;
1507#endif
1508#ifndef CONFIG_FB_MSM_MDP22
1509 mfd->lut_update = mdp_lut_update_lcdc;
1510 mfd->do_histogram = mdp_do_histogram;
1511#endif
1512#ifdef CONFIG_FB_MSM_OVERLAY
1513 mfd->dma_fnc = mdp4_lcdc_overlay;
1514#else
1515 mfd->dma_fnc = mdp_lcdc_update;
1516#endif
1517
1518#ifdef CONFIG_FB_MSM_MDP40
1519 configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
1520 * 23 / 20);
1521 if (mfd->panel.type == HDMI_PANEL) {
1522 mfd->dma = &dma_e_data;
1523 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
1524 } else {
1525 mfd->dma = &dma2_data;
1526 mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
1527 }
1528#else
1529 mfd->dma = &dma2_data;
1530 spin_lock_irqsave(&mdp_spin_lock, flag);
1531 mdp_intr_mask &= ~MDP_DMA_P_DONE;
1532 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
1533 spin_unlock_irqrestore(&mdp_spin_lock, flag);
1534#endif
1535 break;
1536
1537 case TV_PANEL:
1538#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
1539 pdata->on = mdp4_atv_on;
1540 pdata->off = mdp4_atv_off;
1541 mfd->dma_fnc = mdp4_atv_overlay;
1542 mfd->dma = &dma_e_data;
1543 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
1544#else
1545 pdata->on = mdp_dma3_on;
1546 pdata->off = mdp_dma3_off;
1547 mfd->hw_refresh = TRUE;
1548 mfd->dma_fnc = mdp_dma3_update;
1549 mfd->dma = &dma3_data;
1550#endif
1551 break;
1552
Vinay Kalia27020d12011-10-14 17:50:29 -07001553#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
1554 case WRITEBACK_PANEL:
1555 pdata->on = mdp4_overlay_writeback_on;
1556 pdata->off = mdp4_overlay_writeback_off;
1557 mfd->dma_fnc = mdp4_writeback_overlay;
1558 mfd->dma = &dma_e_data;
1559 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1560 break;
1561#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001562 default:
1563 printk(KERN_ERR "mdp_probe: unknown device type!\n");
1564 rc = -ENODEV;
1565 goto mdp_probe_err;
1566 }
1567#ifdef CONFIG_FB_MSM_MDP40
1568 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1569 mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
1570 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1571#endif
1572
1573#ifdef CONFIG_MSM_BUS_SCALING
1574 if (!mdp_bus_scale_handle && mdp_pdata &&
1575 mdp_pdata->mdp_bus_scale_table) {
1576 mdp_bus_scale_handle =
1577 msm_bus_scale_register_client(
1578 mdp_pdata->mdp_bus_scale_table);
1579 if (!mdp_bus_scale_handle) {
1580 printk(KERN_ERR "%s not able to get bus scale\n",
1581 __func__);
1582 return -ENOMEM;
1583 }
1584 }
1585#endif
1586 /* set driver data */
1587 platform_set_drvdata(msm_fb_dev, mfd);
1588
1589 rc = platform_device_add(msm_fb_dev);
1590 if (rc) {
1591 goto mdp_probe_err;
1592 }
1593
1594 pm_runtime_set_active(&pdev->dev);
1595 pm_runtime_enable(&pdev->dev);
1596
1597 pdev_list[pdev_list_cnt++] = pdev;
1598 mdp4_extn_disp = 0;
1599 return 0;
1600
1601 mdp_probe_err:
1602 platform_device_put(msm_fb_dev);
1603#ifdef CONFIG_MSM_BUS_SCALING
1604 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1605 mdp_bus_scale_handle > 0)
1606 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1607#endif
1608 return rc;
1609}
1610
1611#ifdef CONFIG_PM
1612static void mdp_suspend_sub(void)
1613{
1614 /* cancel pipe ctrl worker */
1615 cancel_delayed_work(&mdp_pipe_ctrl_worker);
1616
1617 /* for workder can't be cancelled... */
1618 flush_workqueue(mdp_pipe_ctrl_wq);
1619
1620 /* let's wait for PPP completion */
1621 while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
1622 cpu_relax();
1623
1624 /* try to power down */
1625 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1626
1627 mutex_lock(&mdp_suspend_mutex);
1628 mdp_suspended = TRUE;
1629 mutex_unlock(&mdp_suspend_mutex);
1630}
1631#endif
1632
1633#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1634static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
1635{
1636 if (pdev->id == 0) {
1637 mdp_suspend_sub();
1638 if (mdp_current_clk_on) {
1639 printk(KERN_WARNING"MDP suspend failed\n");
1640 return -EBUSY;
1641 }
1642 }
1643
1644 return 0;
1645}
1646#endif
1647
1648#ifdef CONFIG_HAS_EARLYSUSPEND
1649static void mdp_early_suspend(struct early_suspend *h)
1650{
1651 mdp_suspend_sub();
Ravishangar Kalyanamdf021cf2011-10-20 12:53:27 -07001652#ifdef CONFIG_FB_MSM_DTV
1653 mdp4_dtv_set_black_screen();
1654#endif
Ravishangar Kalyanama7c98912011-09-22 12:44:15 -07001655 if (footswitch && mdp_rev > MDP_REV_42)
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001656 regulator_disable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657}
1658
1659static void mdp_early_resume(struct early_suspend *h)
1660{
Ravishangar Kalyanama7c98912011-09-22 12:44:15 -07001661 if (footswitch && mdp_rev > MDP_REV_42)
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001662 regulator_enable(footswitch);
1663
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664 mutex_lock(&mdp_suspend_mutex);
1665 mdp_suspended = FALSE;
1666 mutex_unlock(&mdp_suspend_mutex);
1667}
1668#endif
1669
1670static int mdp_remove(struct platform_device *pdev)
1671{
1672 if (footswitch != NULL)
1673 regulator_put(footswitch);
1674 iounmap(msm_mdp_base);
1675 pm_runtime_disable(&pdev->dev);
1676#ifdef CONFIG_MSM_BUS_SCALING
1677 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1678 mdp_bus_scale_handle > 0)
1679 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1680#endif
1681 return 0;
1682}
1683
1684static int mdp_register_driver(void)
1685{
1686#ifdef CONFIG_HAS_EARLYSUSPEND
1687 early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
1688 early_suspend.suspend = mdp_early_suspend;
1689 early_suspend.resume = mdp_early_resume;
1690 register_early_suspend(&early_suspend);
1691#endif
1692
1693 return platform_driver_register(&mdp_driver);
1694}
1695
1696static int __init mdp_driver_init(void)
1697{
1698 int ret;
1699
1700 mdp_drv_init();
1701
1702 ret = mdp_register_driver();
1703 if (ret) {
1704 printk(KERN_ERR "mdp_register_driver() failed!\n");
1705 return ret;
1706 }
1707
1708#if defined(CONFIG_DEBUG_FS)
1709 mdp_debugfs_init();
1710#endif
1711
1712 return 0;
1713
1714}
1715
1716module_init(mdp_driver_init);