blob: 989b15418e78750401f5e1266895eda20f5475f2 [file] [log] [blame]
Pavel Machekd480ace2009-09-22 16:47:03 -07001/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
Pavel Machekd480ace2009-09-22 16:47:03 -07006 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/module.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070019#include <linux/kernel.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/sched.h>
21#include <linux/time.h>
22#include <linux/init.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070023#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <linux/spinlock.h>
25#include <linux/hrtimer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070026#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/hardware.h>
28#include <linux/io.h>
29#include <linux/debugfs.h>
30#include <linux/delay.h>
31#include <linux/mutex.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <asm/system.h>
36#include <asm/mach-types.h>
37#include <linux/semaphore.h>
38#include <linux/uaccess.h>
39#include <mach/clk.h>
40#include "mdp.h"
41#include "msm_fb.h"
42#ifdef CONFIG_FB_MSM_MDP40
43#include "mdp4.h"
44#endif
45#include "mipi_dsi.h"
Pavel Machekd480ace2009-09-22 16:47:03 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047uint32 mdp4_extn_disp;
Pavel Machekd480ace2009-09-22 16:47:03 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049static struct clk *mdp_clk;
50static struct clk *mdp_pclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051static struct clk *mdp_lut_clk;
52int mdp_rev;
Pavel Machekd480ace2009-09-22 16:47:03 -070053
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -070054static struct regulator *footswitch;
Pavel Machekd480ace2009-09-22 16:47:03 -070055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056struct completion mdp_ppp_comp;
57struct semaphore mdp_ppp_mutex;
58struct semaphore mdp_pipe_ctrl_mutex;
Pavel Machekd480ace2009-09-22 16:47:03 -070059
kuogee hsieh9a1fb1c2011-10-30 09:15:12 -070060unsigned long mdp_timer_duration = (HZ/50); /* 20 ms */
Pavel Machekd480ace2009-09-22 16:47:03 -070061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062boolean mdp_ppp_waiting = FALSE;
63uint32 mdp_tv_underflow_cnt;
64uint32 mdp_lcdc_underflow_cnt;
65
66boolean mdp_current_clk_on = FALSE;
67boolean mdp_is_in_isr = FALSE;
68
69/*
70 * legacy mdp_in_processing is only for DMA2-MDDI
71 * this applies to DMA2 block only
72 */
73uint32 mdp_in_processing = FALSE;
74
75#ifdef CONFIG_FB_MSM_MDP40
76uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
77#else
78uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
79#endif
80
81MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
82
83atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
84
85spinlock_t mdp_spin_lock;
86struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
87struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
88
89static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
90static struct delayed_work mdp_pipe_ctrl_worker;
91
92static boolean mdp_suspended = FALSE;
93DEFINE_MUTEX(mdp_suspend_mutex);
94
95#ifdef CONFIG_FB_MSM_MDP40
96struct mdp_dma_data dma2_data;
97struct mdp_dma_data dma_s_data;
98struct mdp_dma_data dma_e_data;
99ulong mdp4_display_intf;
100#else
101static struct mdp_dma_data dma2_data;
102static struct mdp_dma_data dma_s_data;
103#ifndef CONFIG_FB_MSM_MDP303
104static struct mdp_dma_data dma_e_data;
105#endif
106#endif
107static struct mdp_dma_data dma3_data;
108
109extern ktime_t mdp_dma2_last_update_time;
110
111extern uint32 mdp_dma2_update_time_in_usec;
112extern int mdp_lcd_rd_cnt_offset_slow;
113extern int mdp_lcd_rd_cnt_offset_fast;
114extern int mdp_usec_diff_threshold;
115
116#ifdef CONFIG_FB_MSM_LCDC
117extern int first_pixel_start_x;
118extern int first_pixel_start_y;
119#endif
120
121#ifdef MSM_FB_ENABLE_DBGFS
122struct dentry *mdp_dir;
123#endif
124
125#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
126static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
127#else
128#define mdp_suspend NULL
129#endif
130
131struct timeval mdp_dma2_timeval;
132struct timeval mdp_ppp_timeval;
133
134#ifdef CONFIG_HAS_EARLYSUSPEND
135static struct early_suspend early_suspend;
136#endif
137
138static u32 mdp_irq;
139
140static uint32 mdp_prim_panel_type = NO_PANEL;
141#ifndef CONFIG_FB_MSM_MDP22
142DEFINE_MUTEX(mdp_lut_push_sem);
143static int mdp_lut_i;
144static int mdp_lut_hw_update(struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700145{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 int i;
147 u16 *c[3];
148 u16 r, g, b;
Pavel Machekd480ace2009-09-22 16:47:03 -0700149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150 c[0] = cmap->green;
151 c[1] = cmap->blue;
152 c[2] = cmap->red;
Pavel Machekd480ace2009-09-22 16:47:03 -0700153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 for (i = 0; i < cmap->len; i++) {
155 if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
156 copy_from_user(&g, cmap->green++, sizeof(g)) ||
157 copy_from_user(&b, cmap->blue++, sizeof(b)))
158 return -EFAULT;
159
160#ifdef CONFIG_FB_MSM_MDP40
161 MDP_OUTP(MDP_BASE + 0x94800 +
162#else
163 MDP_OUTP(MDP_BASE + 0x93800 +
164#endif
165 (0x400*mdp_lut_i) + cmap->start*4 + i*4,
166 ((g & 0xff) |
167 ((b & 0xff) << 8) |
168 ((r & 0xff) << 16)));
Pavel Machekd480ace2009-09-22 16:47:03 -0700169 }
170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700172}
173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174static int mdp_lut_push;
175static int mdp_lut_push_i;
176static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 int ret;
179
180 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
181 ret = mdp_lut_hw_update(cmap);
182 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
183
184 if (ret)
185 return ret;
186
187 mutex_lock(&mdp_lut_push_sem);
188 mdp_lut_push = 1;
189 mdp_lut_push_i = mdp_lut_i;
190 mutex_unlock(&mdp_lut_push_sem);
191
192 mdp_lut_i = (mdp_lut_i + 1)%2;
193
194 return 0;
195}
196
197static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
198{
199 int ret;
200
201 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
202 ret = mdp_lut_hw_update(cmap);
203
204 if (ret) {
205 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
206 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -0700207 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208
209 MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
210 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
211 mdp_lut_i = (mdp_lut_i + 1)%2;
212
213 return 0;
214}
215
216static void mdp_lut_enable(void)
217{
218 if (mdp_lut_push) {
219 mutex_lock(&mdp_lut_push_sem);
220 mdp_lut_push = 0;
221 MDP_OUTP(MDP_BASE + 0x90070,
222 (mdp_lut_push_i << 10) | 0x17);
223 mutex_unlock(&mdp_lut_push_sem);
224 }
225}
226
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700227#define MDP_REV42_HIST_MAX_BIN 128
228#define MDP_REV41_HIST_MAX_BIN 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229
230#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700231unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232struct completion mdp_hist_comp;
233boolean mdp_is_hist_start = FALSE;
234#else
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700235static unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236static struct completion mdp_hist_comp;
237static boolean mdp_is_hist_start = FALSE;
238#endif
239static DEFINE_MUTEX(mdp_hist_mutex);
240
241int mdp_histogram_ctrl(boolean en)
242{
243 unsigned long flag;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700244 unsigned long hist_base;
Carl Vanderlip4c4c2862011-10-27 11:28:55 -0700245 uint32_t status;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700246
247 if (mdp_rev >= MDP_REV_40)
248 hist_base = 0x95000;
249 else
250 hist_base = 0x94000;
251
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700252 if (en == TRUE) {
253 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
254 mdp_hist_frame_cnt = 1;
255 mdp_enable_irq(MDP_HISTOGRAM_TERM);
256 spin_lock_irqsave(&mdp_spin_lock, flag);
257 if (mdp_is_hist_start == FALSE && mdp_rev >= MDP_REV_40) {
258 MDP_OUTP(MDP_BASE + hist_base + 0x10, 1);
259 MDP_OUTP(MDP_BASE + hist_base + 0x1c, INTR_HIST_DONE);
260 }
261 spin_unlock_irqrestore(&mdp_spin_lock, flag);
262 MDP_OUTP(MDP_BASE + hist_base + 0x4, mdp_hist_frame_cnt);
263 MDP_OUTP(MDP_BASE + hist_base, 1);
264 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
265 } else {
Carl Vanderlip4c4c2862011-10-27 11:28:55 -0700266 if (mdp_rev >= MDP_REV_40) {
267 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
268 status = inpdw(MDP_BASE + hist_base + 0x1C);
269 status &= ~INTR_HIST_DONE;
270 MDP_OUTP(MDP_BASE + hist_base + 0x1C, status);
271
272 MDP_OUTP(MDP_BASE + hist_base + 0x18, INTR_HIST_DONE);
273 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
274 FALSE);
275 }
276
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700277 mdp_disable_irq(MDP_HISTOGRAM_TERM);
Pavel Machekd480ace2009-09-22 16:47:03 -0700278 }
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700279
Pavel Machekd480ace2009-09-22 16:47:03 -0700280 return 0;
281}
282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283int mdp_start_histogram(struct fb_info *info)
Pavel Machekd480ace2009-09-22 16:47:03 -0700284{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 unsigned long flag;
Pavel Machekd480ace2009-09-22 16:47:03 -0700286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 int ret = 0;
288 mutex_lock(&mdp_hist_mutex);
289 if (mdp_is_hist_start == TRUE) {
290 printk(KERN_ERR "%s histogram already started\n", __func__);
291 ret = -EPERM;
292 goto mdp_hist_start_err;
293 }
294
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700295 ret = mdp_histogram_ctrl(TRUE);
296
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 spin_lock_irqsave(&mdp_spin_lock, flag);
298 mdp_is_hist_start = TRUE;
299 spin_unlock_irqrestore(&mdp_spin_lock, flag);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300
301mdp_hist_start_err:
302 mutex_unlock(&mdp_hist_mutex);
303 return ret;
304
305}
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307int mdp_stop_histogram(struct fb_info *info)
308{
309 unsigned long flag;
310 int ret = 0;
311 mutex_lock(&mdp_hist_mutex);
312 if (!mdp_is_hist_start) {
313 printk(KERN_ERR "%s histogram already stopped\n", __func__);
314 ret = -EPERM;
315 goto mdp_hist_stop_err;
316 }
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 spin_lock_irqsave(&mdp_spin_lock, flag);
319 mdp_is_hist_start = FALSE;
320 spin_unlock_irqrestore(&mdp_spin_lock, flag);
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700321
322 ret = mdp_histogram_ctrl(FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323
324mdp_hist_stop_err:
325 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700326 return ret;
327}
328
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700329static int mdp_copy_hist_data(struct mdp_histogram *hist)
Pavel Machekd480ace2009-09-22 16:47:03 -0700330{
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700331 char *mdp_hist_base;
332 uint32 r_data_offset = 0x100, g_data_offset = 0x200;
333 uint32 b_data_offset = 0x300;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 int ret = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700335
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700336 mutex_lock(&mdp_hist_mutex);
337 if (mdp_rev >= MDP_REV_42) {
338 mdp_hist_base = MDP_BASE + 0x95000;
339 r_data_offset = 0x400;
340 g_data_offset = 0x800;
341 b_data_offset = 0xc00;
342 } else if (mdp_rev >= MDP_REV_40 && mdp_rev <= MDP_REV_41) {
343 mdp_hist_base = MDP_BASE + 0x95000;
344 } else if (mdp_rev >= MDP_REV_30 && mdp_rev <= MDP_REV_31) {
345 mdp_hist_base = MDP_BASE + 0x94000;
346 } else {
347 pr_err("%s(): Unsupported MDP rev %u\n", __func__, mdp_rev);
348 return -EPERM;
349 }
350
351 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
352 if (hist->r) {
353 ret = copy_to_user(hist->r, mdp_hist_base + r_data_offset,
354 hist->bin_cnt * 4);
355 if (ret)
356 goto hist_err;
357 }
358 if (hist->g) {
359 ret = copy_to_user(hist->g, mdp_hist_base + g_data_offset,
360 hist->bin_cnt * 4);
361 if (ret)
362 goto hist_err;
363 }
364 if (hist->b) {
365 ret = copy_to_user(hist->b, mdp_hist_base + b_data_offset,
366 hist->bin_cnt * 4);
367 if (ret)
368 goto hist_err;
369 }
370
371 if (mdp_is_hist_start == TRUE) {
372 MDP_OUTP(mdp_hist_base + 0x004,
373 mdp_hist_frame_cnt);
374 MDP_OUTP(mdp_hist_base, 1);
375 }
376 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
377 mutex_unlock(&mdp_hist_mutex);
378 return 0;
379
380hist_err:
381 printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
382 return ret;
383}
384
385static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
386{
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700387 if (!hist->frame_cnt || (hist->bin_cnt == 0))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 return -EINVAL;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700389
390 if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN)
391 || (mdp_rev == MDP_REV_42 &&
392 hist->bin_cnt > MDP_REV42_HIST_MAX_BIN))
393 return -EINVAL;
394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 mutex_lock(&mdp_hist_mutex);
396 if (!mdp_is_hist_start) {
397 printk(KERN_ERR "%s histogram not started\n", __func__);
398 mutex_unlock(&mdp_hist_mutex);
399 return -EPERM;
400 }
401 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403 INIT_COMPLETION(mdp_hist_comp);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700404 mdp_hist_frame_cnt = hist->frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 wait_for_completion_killable(&mdp_hist_comp);
406
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700407 return mdp_copy_hist_data(hist);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408}
409#endif
410
411/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
412
413int mdp_ppp_pipe_wait(void)
414{
415 int ret = 1;
416
417 /* wait 5 seconds for the operation to complete before declaring
418 the MDP hung */
419
420 if (mdp_ppp_waiting == TRUE) {
421 ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
422 5 * HZ);
423
424 if (!ret)
425 printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
426 __func__);
Pavel Machekd480ace2009-09-22 16:47:03 -0700427 }
428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 return ret;
430}
Pavel Machekd480ace2009-09-22 16:47:03 -0700431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432static DEFINE_SPINLOCK(mdp_lock);
433static int mdp_irq_mask;
434static int mdp_irq_enabled;
Pavel Machekd480ace2009-09-22 16:47:03 -0700435
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436/*
437 * mdp_enable_irq: can not be called from isr
438 */
439void mdp_enable_irq(uint32 term)
440{
441 unsigned long irq_flags;
442
443 spin_lock_irqsave(&mdp_lock, irq_flags);
444 if (mdp_irq_mask & term) {
445 printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
446 __func__, term, mdp_irq_mask, mdp_irq_enabled);
447 } else {
448 mdp_irq_mask |= term;
449 if (mdp_irq_mask && !mdp_irq_enabled) {
450 mdp_irq_enabled = 1;
451 enable_irq(mdp_irq);
452 }
453 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700454 spin_unlock_irqrestore(&mdp_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455}
456
457/*
458 * mdp_disable_irq: can not be called from isr
459 */
460void mdp_disable_irq(uint32 term)
461{
462 unsigned long irq_flags;
463
464 spin_lock_irqsave(&mdp_lock, irq_flags);
465 if (!(mdp_irq_mask & term)) {
466 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
467 __func__, term, mdp_irq_mask, mdp_irq_enabled);
468 } else {
469 mdp_irq_mask &= ~term;
470 if (!mdp_irq_mask && mdp_irq_enabled) {
471 mdp_irq_enabled = 0;
472 disable_irq(mdp_irq);
473 }
474 }
475 spin_unlock_irqrestore(&mdp_lock, irq_flags);
476}
477
478void mdp_disable_irq_nosync(uint32 term)
479{
480 spin_lock(&mdp_lock);
481 if (!(mdp_irq_mask & term)) {
482 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
483 __func__, term, mdp_irq_mask, mdp_irq_enabled);
484 } else {
485 mdp_irq_mask &= ~term;
486 if (!mdp_irq_mask && mdp_irq_enabled) {
487 mdp_irq_enabled = 0;
488 disable_irq_nosync(mdp_irq);
489 }
490 }
491 spin_unlock(&mdp_lock);
492}
493
494void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
495{
496 /* complete all the writes before starting */
497 wmb();
498
499 /* kick off PPP engine */
500 if (term == MDP_PPP_TERM) {
501 if (mdp_debug[MDP_PPP_BLOCK])
502 jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
503
504 /* let's turn on PPP block */
505 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
506
507 mdp_enable_irq(term);
508 INIT_COMPLETION(mdp_ppp_comp);
509 mdp_ppp_waiting = TRUE;
510 outpdw(MDP_BASE + 0x30, 0x1000);
511 wait_for_completion_killable(&mdp_ppp_comp);
512 mdp_disable_irq(term);
513
514 if (mdp_debug[MDP_PPP_BLOCK]) {
515 struct timeval now;
516
517 jiffies_to_timeval(jiffies, &now);
518 mdp_ppp_timeval.tv_usec =
519 now.tv_usec - mdp_ppp_timeval.tv_usec;
520 MSM_FB_DEBUG("MDP-PPP: %d\n",
521 (int)mdp_ppp_timeval.tv_usec);
522 }
523 } else if (term == MDP_DMA2_TERM) {
524 if (mdp_debug[MDP_DMA2_BLOCK]) {
525 MSM_FB_DEBUG("MDP-DMA2: %d\n",
526 (int)mdp_dma2_timeval.tv_usec);
527 jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
528 }
529 /* DMA update timestamp */
530 mdp_dma2_last_update_time = ktime_get_real();
531 /* let's turn on DMA2 block */
532#if 0
533 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
534#endif
535#ifdef CONFIG_FB_MSM_MDP22
536 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
537#else
538 mdp_lut_enable();
539
540#ifdef CONFIG_FB_MSM_MDP40
541 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
542#else
543 outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
544
545#ifdef CONFIG_FB_MSM_MDP303
546
547#ifdef CONFIG_FB_MSM_MIPI_DSI
kuogee hsieh8717a172011-09-05 09:57:58 -0700548 mipi_dsi_cmd_mdp_start();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549#endif
550
551#endif
552
553#endif
554#endif
555#ifdef CONFIG_FB_MSM_MDP40
556 } else if (term == MDP_DMA_S_TERM) {
557 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
558 outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
559 } else if (term == MDP_DMA_E_TERM) {
560 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
561 outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
562 } else if (term == MDP_OVERLAY0_TERM) {
563 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
564 mdp_lut_enable();
565 outpdw(MDP_BASE + 0x0004, 0);
566 } else if (term == MDP_OVERLAY1_TERM) {
567 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
568 mdp_lut_enable();
569 outpdw(MDP_BASE + 0x0008, 0);
570 }
571#else
572 } else if (term == MDP_DMA_S_TERM) {
573 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
574 outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
575 } else if (term == MDP_DMA_E_TERM) {
576 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
577 outpdw(MDP_BASE + 0x004C, 0x0);
578 }
579#endif
580}
581static int mdp_clk_rate;
582static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
583static int pdev_list_cnt;
584
585static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
586{
587 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
588}
589void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
590 boolean isr)
591{
592 boolean mdp_all_blocks_off = TRUE;
593 int i;
594 unsigned long flag;
595 struct msm_fb_panel_data *pdata;
596
597 /*
598 * It is assumed that if isr = TRUE then start = OFF
599 * if start = ON when isr = TRUE it could happen that the usercontext
600 * could turn off the clocks while the interrupt is updating the
601 * power to ON
602 */
603 WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
604
605 spin_lock_irqsave(&mdp_spin_lock, flag);
606 if (MDP_BLOCK_POWER_ON == state) {
607 atomic_inc(&mdp_block_power_cnt[block]);
608
609 if (MDP_DMA2_BLOCK == block)
610 mdp_in_processing = TRUE;
611 } else {
612 atomic_dec(&mdp_block_power_cnt[block]);
613
614 if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
615 /*
616 * Master has to serve a request to power off MDP always
617 * It also has a timer to power off. So, in case of
618 * timer expires first and DMA2 finishes later,
619 * master has to power off two times
620 * There shouldn't be multiple power-off request for
621 * other blocks
622 */
623 if (block != MDP_MASTER_BLOCK) {
624 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
625 multiple power-off request\n", block);
626 }
627 atomic_set(&mdp_block_power_cnt[block], 0);
628 }
629
630 if (MDP_DMA2_BLOCK == block)
631 mdp_in_processing = FALSE;
632 }
633 spin_unlock_irqrestore(&mdp_spin_lock, flag);
634
635 /*
636 * If it's in isr, we send our request to workqueue.
637 * Otherwise, processing happens in the current context
638 */
639 if (isr) {
640 if (mdp_current_clk_on) {
641 /* checking all blocks power state */
642 for (i = 0; i < MDP_MAX_BLOCK; i++) {
643 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
644 mdp_all_blocks_off = FALSE;
645 break;
646 }
647 }
648
649 if (mdp_all_blocks_off) {
650 /* send workqueue to turn off mdp power */
651 queue_delayed_work(mdp_pipe_ctrl_wq,
652 &mdp_pipe_ctrl_worker,
653 mdp_timer_duration);
654 }
655 }
656 } else {
657 down(&mdp_pipe_ctrl_mutex);
658 /* checking all blocks power state */
659 for (i = 0; i < MDP_MAX_BLOCK; i++) {
660 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
661 mdp_all_blocks_off = FALSE;
662 break;
663 }
664 }
665
666 /*
667 * find out whether a delayable work item is currently
668 * pending
669 */
670
671 if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
672 /*
673 * try to cancel the current work if it fails to
674 * stop (which means del_timer can't delete it
675 * from the list, it's about to expire and run),
676 * we have to let it run. queue_delayed_work won't
677 * accept the next job which is same as
678 * queue_delayed_work(mdp_timer_duration = 0)
679 */
680 cancel_delayed_work(&mdp_pipe_ctrl_worker);
681 }
682
683 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
684 mutex_lock(&mdp_suspend_mutex);
685 if (block == MDP_MASTER_BLOCK || mdp_suspended) {
686 mdp_current_clk_on = FALSE;
687 mb();
688 /* turn off MDP clks */
689 mdp_vsync_clk_disable();
690 for (i = 0; i < pdev_list_cnt; i++) {
691 pdata = (struct msm_fb_panel_data *)
692 pdev_list[i]->dev.platform_data;
693 if (pdata && pdata->clk_func)
694 pdata->clk_func(0);
695 }
696 if (mdp_clk != NULL) {
697 mdp_clk_rate = clk_get_rate(mdp_clk);
698 clk_disable(mdp_clk);
699 if (mdp_hw_revision <=
700 MDP4_REVISION_V2_1 &&
701 mdp_clk_rate > 122880000) {
702 clk_set_rate(mdp_clk,
703 122880000);
704 }
705 MSM_FB_DEBUG("MDP CLK OFF\n");
706 }
707 if (mdp_pclk != NULL) {
708 clk_disable(mdp_pclk);
709 MSM_FB_DEBUG("MDP PCLK OFF\n");
710 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 if (mdp_lut_clk != NULL)
712 clk_disable(mdp_lut_clk);
713 } else {
714 /* send workqueue to turn off mdp power */
715 queue_delayed_work(mdp_pipe_ctrl_wq,
716 &mdp_pipe_ctrl_worker,
717 mdp_timer_duration);
718 }
719 mutex_unlock(&mdp_suspend_mutex);
720 } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
721 mdp_current_clk_on = TRUE;
722 /* turn on MDP clks */
723 for (i = 0; i < pdev_list_cnt; i++) {
724 pdata = (struct msm_fb_panel_data *)
725 pdev_list[i]->dev.platform_data;
726 if (pdata && pdata->clk_func)
727 pdata->clk_func(1);
728 }
729 if (mdp_clk != NULL) {
730 if (mdp_hw_revision <=
731 MDP4_REVISION_V2_1 &&
732 mdp_clk_rate > 122880000) {
733 clk_set_rate(mdp_clk,
734 mdp_clk_rate);
735 }
736 clk_enable(mdp_clk);
737 MSM_FB_DEBUG("MDP CLK ON\n");
738 }
739 if (mdp_pclk != NULL) {
740 clk_enable(mdp_pclk);
741 MSM_FB_DEBUG("MDP PCLK ON\n");
742 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743 if (mdp_lut_clk != NULL)
744 clk_enable(mdp_lut_clk);
745 mdp_vsync_clk_enable();
746 }
747 up(&mdp_pipe_ctrl_mutex);
748 }
749}
750
751#ifndef CONFIG_FB_MSM_MDP40
752irqreturn_t mdp_isr(int irq, void *ptr)
753{
754 uint32 mdp_interrupt = 0;
755 struct mdp_dma_data *dma;
756
757 mdp_is_in_isr = TRUE;
758 do {
759 mdp_interrupt = inp32(MDP_INTR_STATUS);
760 outp32(MDP_INTR_CLEAR, mdp_interrupt);
761
762 mdp_interrupt &= mdp_intr_mask;
763
764 if (mdp_interrupt & TV_ENC_UNDERRUN) {
765 mdp_interrupt &= ~(TV_ENC_UNDERRUN);
766 mdp_tv_underflow_cnt++;
767 }
768
769 if (!mdp_interrupt)
770 break;
771
772 /* DMA3 TV-Out Start */
773 if (mdp_interrupt & TV_OUT_DMA3_START) {
774 /* let's disable TV out interrupt */
775 mdp_intr_mask &= ~TV_OUT_DMA3_START;
776 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
777
778 dma = &dma3_data;
779 if (dma->waiting) {
780 dma->waiting = FALSE;
781 complete(&dma->comp);
782 }
783 }
784#ifndef CONFIG_FB_MSM_MDP22
785 if (mdp_interrupt & MDP_HIST_DONE) {
786 outp32(MDP_BASE + 0x94018, 0x3);
787 outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 complete(&mdp_hist_comp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 }
790
791 /* LCDC UnderFlow */
792 if (mdp_interrupt & LCDC_UNDERFLOW) {
793 mdp_lcdc_underflow_cnt++;
794 /*when underflow happens HW resets all the histogram
795 registers that were set before so restore them back
796 to normal.*/
797 MDP_OUTP(MDP_BASE + 0x94010, 1);
798 MDP_OUTP(MDP_BASE + 0x9401c, 2);
799 if (mdp_is_hist_start == TRUE) {
800 MDP_OUTP(MDP_BASE + 0x94004,
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700801 mdp_hist_frame_cnt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802 MDP_OUTP(MDP_BASE + 0x94000, 1);
803 }
804 }
805 /* LCDC Frame Start */
806 if (mdp_interrupt & LCDC_FRAME_START) {
807 /* let's disable LCDC interrupt */
808 mdp_intr_mask &= ~LCDC_FRAME_START;
809 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
810
811 dma = &dma2_data;
812 if (dma->waiting) {
813 dma->waiting = FALSE;
814 complete(&dma->comp);
815 }
816 }
817
818 /* DMA2 LCD-Out Complete */
819 if (mdp_interrupt & MDP_DMA_S_DONE) {
820 dma = &dma_s_data;
821 dma->busy = FALSE;
822 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
823 TRUE);
824 complete(&dma->comp);
825 }
826 /* DMA_E LCD-Out Complete */
827 if (mdp_interrupt & MDP_DMA_E_DONE) {
828 dma = &dma_s_data;
829 dma->busy = FALSE;
830 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
831 TRUE);
832 complete(&dma->comp);
833 }
834
835#endif
836
837 /* DMA2 LCD-Out Complete */
838 if (mdp_interrupt & MDP_DMA_P_DONE) {
839 struct timeval now;
840
841 mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
842 mdp_dma2_last_update_time);
843 if (mdp_debug[MDP_DMA2_BLOCK]) {
844 jiffies_to_timeval(jiffies, &now);
845 mdp_dma2_timeval.tv_usec =
846 now.tv_usec - mdp_dma2_timeval.tv_usec;
847 }
848#ifndef CONFIG_FB_MSM_MDP303
849 dma = &dma2_data;
850 dma->busy = FALSE;
851 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
852 TRUE);
853 complete(&dma->comp);
854#else
855 if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
856 dma = &dma2_data;
857 dma->busy = FALSE;
858 mdp_pipe_ctrl(MDP_DMA2_BLOCK,
859 MDP_BLOCK_POWER_OFF, TRUE);
860 complete(&dma->comp);
861 }
862#endif
863 }
864 /* PPP Complete */
865 if (mdp_interrupt & MDP_PPP_DONE) {
866#ifdef CONFIG_FB_MSM_MDP31
867 MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
868#endif
869 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
870 if (mdp_ppp_waiting) {
871 mdp_ppp_waiting = FALSE;
872 complete(&mdp_ppp_comp);
873 }
874 }
875 } while (1);
876
877 mdp_is_in_isr = FALSE;
878
Pavel Machekd480ace2009-09-22 16:47:03 -0700879 return IRQ_HANDLED;
880}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883static void mdp_drv_init(void)
Pavel Machekd480ace2009-09-22 16:47:03 -0700884{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 int i;
Pavel Machekd480ace2009-09-22 16:47:03 -0700886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 for (i = 0; i < MDP_MAX_BLOCK; i++) {
888 mdp_debug[i] = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700889 }
890
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 /* initialize spin lock and workqueue */
892 spin_lock_init(&mdp_spin_lock);
893 mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
894 mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
895 mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
896 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
897 mdp_pipe_ctrl_workqueue_handler);
Pavel Machekd480ace2009-09-22 16:47:03 -0700898
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 /* initialize semaphore */
900 init_completion(&mdp_ppp_comp);
901 sema_init(&mdp_ppp_mutex, 1);
902 sema_init(&mdp_pipe_ctrl_mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 dma2_data.busy = FALSE;
905 dma2_data.dmap_busy = FALSE;
906 dma2_data.waiting = FALSE;
907 init_completion(&dma2_data.comp);
908 init_completion(&dma2_data.dmap_comp);
909 sema_init(&dma2_data.mutex, 1);
910 mutex_init(&dma2_data.ov_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 dma3_data.busy = FALSE;
913 dma3_data.waiting = FALSE;
914 init_completion(&dma3_data.comp);
915 sema_init(&dma3_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 dma_s_data.busy = FALSE;
918 dma_s_data.waiting = FALSE;
919 init_completion(&dma_s_data.comp);
920 sema_init(&dma_s_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922#ifndef CONFIG_FB_MSM_MDP303
923 dma_e_data.busy = FALSE;
924 dma_e_data.waiting = FALSE;
925 init_completion(&dma_e_data.comp);
926 mutex_init(&dma_e_data.ov_mutex);
927#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929#ifndef CONFIG_FB_MSM_MDP22
930 init_completion(&mdp_hist_comp);
931#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 /* initializing mdp power block counter to 0 */
934 for (i = 0; i < MDP_MAX_BLOCK; i++) {
935 atomic_set(&mdp_block_power_cnt[i], 0);
Pavel Machekd480ace2009-09-22 16:47:03 -0700936 }
937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938#ifdef MSM_FB_ENABLE_DBGFS
939 {
940 struct dentry *root;
941 char sub_name[] = "mdp";
Pavel Machekd480ace2009-09-22 16:47:03 -0700942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943 root = msm_fb_get_debugfs_root();
944 if (root != NULL) {
945 mdp_dir = debugfs_create_dir(sub_name, root);
946
947 if (mdp_dir) {
948 msm_fb_debugfs_file_create(mdp_dir,
949 "dma2_update_time_in_usec",
950 (u32 *) &mdp_dma2_update_time_in_usec);
951 msm_fb_debugfs_file_create(mdp_dir,
952 "vs_rdcnt_slow",
953 (u32 *) &mdp_lcd_rd_cnt_offset_slow);
954 msm_fb_debugfs_file_create(mdp_dir,
955 "vs_rdcnt_fast",
956 (u32 *) &mdp_lcd_rd_cnt_offset_fast);
957 msm_fb_debugfs_file_create(mdp_dir,
958 "mdp_usec_diff_threshold",
959 (u32 *) &mdp_usec_diff_threshold);
960 msm_fb_debugfs_file_create(mdp_dir,
961 "mdp_current_clk_on",
962 (u32 *) &mdp_current_clk_on);
963#ifdef CONFIG_FB_MSM_LCDC
964 msm_fb_debugfs_file_create(mdp_dir,
965 "lcdc_start_x",
966 (u32 *) &first_pixel_start_x);
967 msm_fb_debugfs_file_create(mdp_dir,
968 "lcdc_start_y",
969 (u32 *) &first_pixel_start_y);
970#endif
971 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700972 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700973 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974#endif
975}
976
977static int mdp_probe(struct platform_device *pdev);
978static int mdp_remove(struct platform_device *pdev);
979
980static int mdp_runtime_suspend(struct device *dev)
981{
982 dev_dbg(dev, "pm_runtime: suspending...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700983 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700984}
985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986static int mdp_runtime_resume(struct device *dev)
Pavel Machekd480ace2009-09-22 16:47:03 -0700987{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988 dev_dbg(dev, "pm_runtime: resuming...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -0700989 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700990}
991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992static struct dev_pm_ops mdp_dev_pm_ops = {
993 .runtime_suspend = mdp_runtime_suspend,
994 .runtime_resume = mdp_runtime_resume,
Pavel Machekd480ace2009-09-22 16:47:03 -0700995};
996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997
998static struct platform_driver mdp_driver = {
999 .probe = mdp_probe,
1000 .remove = mdp_remove,
1001#ifndef CONFIG_HAS_EARLYSUSPEND
1002 .suspend = mdp_suspend,
1003 .resume = NULL,
1004#endif
1005 .shutdown = NULL,
1006 .driver = {
1007 /*
1008 * Driver name must match the device name added in
1009 * platform.c.
1010 */
1011 .name = "mdp",
1012 .pm = &mdp_dev_pm_ops,
1013 },
1014};
1015
1016static int mdp_off(struct platform_device *pdev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001017{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 int ret = 0;
1019 mdp_histogram_ctrl(FALSE);
1020
1021 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1022 ret = panel_next_off(pdev);
1023 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1024
1025 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -07001026}
1027
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028static int mdp_on(struct platform_device *pdev)
1029{
1030 int ret = 0;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001031
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001033 struct msm_fb_data_type *mfd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1035 if (is_mdp4_hw_reset()) {
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001036 mfd = platform_get_drvdata(pdev);
1037 mdp_vsync_cfg_regs(mfd, FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038 mdp4_hw_init();
1039 outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
1040 }
1041 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1042#endif
1043 mdp_histogram_ctrl(TRUE);
1044
1045 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1046 ret = panel_next_on(pdev);
1047 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1048 return ret;
1049}
1050
1051static int mdp_resource_initialized;
1052static struct msm_panel_common_pdata *mdp_pdata;
1053
1054uint32 mdp_hw_revision;
1055
1056/*
1057 * mdp_hw_revision:
1058 * 0 == V1
1059 * 1 == V2
1060 * 2 == V2.1
1061 *
1062 */
1063void mdp_hw_version(void)
1064{
1065 char *cp;
1066 uint32 *hp;
1067
1068 if (mdp_pdata == NULL)
1069 return;
1070
1071 mdp_hw_revision = MDP4_REVISION_NONE;
1072 if (mdp_pdata->hw_revision_addr == 0)
1073 return;
1074
1075 /* tlmmgpio2 shadow */
1076 cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
1077
1078 if (cp == NULL)
1079 return;
1080
1081 hp = (uint32 *)cp; /* HW_REVISION_NUMBER */
1082 mdp_hw_revision = *hp;
1083 iounmap(cp);
1084
1085 mdp_hw_revision >>= 28; /* bit 31:28 */
1086 mdp_hw_revision &= 0x0f;
1087
1088 MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
1089 __func__, mdp_hw_revision);
1090}
1091
kuogee hsieh5a7f32c2011-08-31 17:51:34 -07001092int mdp4_writeback_offset(void)
1093{
1094 int off = 0;
1095
1096 if (mdp_pdata->writeback_offset)
1097 off = mdp_pdata->writeback_offset();
1098
1099 pr_debug("%s: writeback_offset=%d %x\n", __func__, off, off);
1100
1101 return off;
1102}
1103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104#ifdef CONFIG_FB_MSM_MDP40
1105static void configure_mdp_core_clk_table(uint32 min_clk_rate)
1106{
1107 uint8 count;
1108 uint32 current_rate;
Matt Wagantalla12cc952011-11-08 18:14:50 -08001109 if (mdp_clk && mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1110 min_clk_rate = clk_round_rate(mdp_clk, min_clk_rate);
1111 if (clk_set_rate(mdp_clk, min_clk_rate) < 0)
1112 printk(KERN_ERR "%s: clk_set_rate failed\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113 __func__);
1114 else {
1115 count = 0;
1116 current_rate = clk_get_rate(mdp_clk);
1117 while (count < mdp_pdata->num_mdp_clk) {
1118 if (mdp_pdata->mdp_core_clk_table[count]
1119 < current_rate) {
1120 mdp_pdata->
1121 mdp_core_clk_table[count] =
1122 current_rate;
1123 }
1124 count++;
1125 }
1126 }
1127 }
1128}
1129#endif
1130
1131#ifdef CONFIG_MSM_BUS_SCALING
1132static uint32_t mdp_bus_scale_handle;
1133int mdp_bus_scale_update_request(uint32_t index)
1134{
1135 if (!mdp_pdata && (!mdp_pdata->mdp_bus_scale_table
1136 || index > (mdp_pdata->mdp_bus_scale_table->num_usecases - 1))) {
1137 printk(KERN_ERR "%s invalid table or index\n", __func__);
1138 return -EINVAL;
1139 }
1140 if (mdp_bus_scale_handle < 1) {
1141 printk(KERN_ERR "%s invalid bus handle\n", __func__);
1142 return -EINVAL;
1143 }
1144 return msm_bus_scale_client_update_request(mdp_bus_scale_handle,
1145 index);
1146}
1147#endif
1148DEFINE_MUTEX(mdp_clk_lock);
1149int mdp_set_core_clk(uint16 perf_level)
1150{
1151 int ret = -EINVAL;
1152 if (mdp_clk && mdp_pdata
1153 && mdp_pdata->mdp_core_clk_table) {
1154 if (perf_level > mdp_pdata->num_mdp_clk)
1155 printk(KERN_ERR "%s invalid perf level\n", __func__);
1156 else {
1157 mutex_lock(&mdp_clk_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158 ret = clk_set_rate(mdp_clk,
1159 mdp_pdata->
1160 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1161 - perf_level]);
1162 mutex_unlock(&mdp_clk_lock);
1163 if (ret) {
1164 printk(KERN_ERR "%s unable to set mdp_core_clk rate\n",
1165 __func__);
1166 }
1167 }
1168 }
1169 return ret;
1170}
1171
1172unsigned long mdp_get_core_clk(void)
1173{
1174 unsigned long clk_rate = 0;
1175 if (mdp_clk) {
1176 mutex_lock(&mdp_clk_lock);
1177 clk_rate = clk_get_rate(mdp_clk);
1178 mutex_unlock(&mdp_clk_lock);
1179 }
1180
1181 return clk_rate;
1182}
1183
1184unsigned long mdp_perf_level2clk_rate(uint32 perf_level)
1185{
1186 unsigned long clk_rate = 0;
1187
1188 if (mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1189 if (perf_level > mdp_pdata->num_mdp_clk) {
1190 printk(KERN_ERR "%s invalid perf level\n", __func__);
1191 clk_rate = mdp_get_core_clk();
1192 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 clk_rate = mdp_pdata->
1194 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1195 - perf_level];
1196 }
1197 } else
1198 clk_rate = mdp_get_core_clk();
1199
1200 return clk_rate;
1201}
1202
1203static int mdp_irq_clk_setup(void)
1204{
1205 int ret;
1206
1207#ifdef CONFIG_FB_MSM_MDP40
1208 ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
1209#else
1210 ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
1211#endif
1212 if (ret) {
1213 printk(KERN_ERR "mdp request_irq() failed!\n");
1214 return ret;
1215 }
1216 disable_irq(mdp_irq);
1217
1218 footswitch = regulator_get(NULL, "fs_mdp");
1219 if (IS_ERR(footswitch))
1220 footswitch = NULL;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001221 else
1222 regulator_enable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223
1224 mdp_clk = clk_get(NULL, "mdp_clk");
1225 if (IS_ERR(mdp_clk)) {
1226 ret = PTR_ERR(mdp_clk);
1227 printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
1228 free_irq(mdp_irq, 0);
1229 return ret;
1230 }
1231
1232 mdp_pclk = clk_get(NULL, "mdp_pclk");
1233 if (IS_ERR(mdp_pclk))
1234 mdp_pclk = NULL;
1235
1236 if (mdp_rev == MDP_REV_42) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 mdp_lut_clk = clk_get(NULL, "lut_mdp");
1238 if (IS_ERR(mdp_lut_clk)) {
1239 ret = PTR_ERR(mdp_lut_clk);
1240 pr_err("can't get mdp_clk error:%d!\n", ret);
1241 clk_put(mdp_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 free_irq(mdp_irq, 0);
1243 return ret;
1244 }
1245 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001246 mdp_lut_clk = NULL;
1247 }
1248
1249#ifdef CONFIG_FB_MSM_MDP40
1250 /*
1251 * mdp_clk should greater than mdp_pclk always
1252 */
1253 if (mdp_pdata && mdp_pdata->mdp_core_clk_rate) {
1254 mutex_lock(&mdp_clk_lock);
1255 clk_set_rate(mdp_clk, mdp_pdata->mdp_core_clk_rate);
1256 if (mdp_lut_clk != NULL)
1257 clk_set_rate(mdp_lut_clk, mdp_pdata->mdp_core_clk_rate);
1258 mutex_unlock(&mdp_clk_lock);
1259 }
1260 MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
1261#endif
1262 return 0;
1263}
1264
1265static int mdp_probe(struct platform_device *pdev)
1266{
1267 struct platform_device *msm_fb_dev = NULL;
1268 struct msm_fb_data_type *mfd;
1269 struct msm_fb_panel_data *pdata = NULL;
1270 int rc;
1271 resource_size_t size ;
1272#ifdef CONFIG_FB_MSM_MDP40
1273 int intf, if_no;
1274#else
1275 unsigned long flag;
1276#endif
1277#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
1278 struct mipi_panel_info *mipi;
1279#endif
1280
1281 if ((pdev->id == 0) && (pdev->num_resources > 0)) {
1282 mdp_pdata = pdev->dev.platform_data;
1283
1284 size = resource_size(&pdev->resource[0]);
1285 msm_mdp_base = ioremap(pdev->resource[0].start, size);
1286
1287 MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
1288 (int)pdev->resource[0].start, (int)msm_mdp_base);
1289
1290 if (unlikely(!msm_mdp_base))
1291 return -ENOMEM;
1292
1293 mdp_irq = platform_get_irq(pdev, 0);
1294 if (mdp_irq < 0) {
1295 pr_err("mdp: can not get mdp irq\n");
1296 return -ENOMEM;
1297 }
1298
1299 mdp_rev = mdp_pdata->mdp_rev;
1300 rc = mdp_irq_clk_setup();
1301
1302 if (rc)
1303 return rc;
1304
1305 mdp_hw_version();
1306
1307 /* initializing mdp hw */
1308#ifdef CONFIG_FB_MSM_MDP40
1309 mdp4_hw_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310#else
1311 mdp_hw_init();
1312#endif
1313
1314#ifdef CONFIG_FB_MSM_OVERLAY
1315 mdp_hw_cursor_init();
1316#endif
1317
1318 mdp_resource_initialized = 1;
1319 return 0;
1320 }
1321
1322 if (!mdp_resource_initialized)
1323 return -EPERM;
1324
1325 mfd = platform_get_drvdata(pdev);
1326
1327 if (!mfd)
1328 return -ENODEV;
1329
1330 if (mfd->key != MFD_KEY)
1331 return -EINVAL;
1332
1333 if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
1334 return -ENOMEM;
1335
1336 msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
1337 if (!msm_fb_dev)
1338 return -ENOMEM;
1339
1340 /* link to the latest pdev */
1341 mfd->pdev = msm_fb_dev;
Nagamalleswararao Ganjie8943492011-11-01 13:04:10 -07001342 mfd->mdp_rev = mdp_rev;
1343
1344 mfd->ov0_blt_state = 0;
1345 mfd->use_ov0_blt = 0 ;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346
1347 /* add panel data */
1348 if (platform_device_add_data
1349 (msm_fb_dev, pdev->dev.platform_data,
1350 sizeof(struct msm_fb_panel_data))) {
1351 printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
1352 rc = -ENOMEM;
1353 goto mdp_probe_err;
1354 }
1355 /* data chain */
1356 pdata = msm_fb_dev->dev.platform_data;
1357 pdata->on = mdp_on;
1358 pdata->off = mdp_off;
1359 pdata->next = pdev;
1360
1361 mdp_prim_panel_type = mfd->panel.type;
1362 switch (mfd->panel.type) {
1363 case EXT_MDDI_PANEL:
1364 case MDDI_PANEL:
1365 case EBI2_PANEL:
1366 INIT_WORK(&mfd->dma_update_worker,
1367 mdp_lcd_update_workqueue_handler);
1368 INIT_WORK(&mfd->vsync_resync_worker,
1369 mdp_vsync_resync_workqueue_handler);
1370 mfd->hw_refresh = FALSE;
1371
1372 if (mfd->panel.type == EXT_MDDI_PANEL) {
1373 /* 15 fps -> 66 msec */
1374 mfd->refresh_timer_duration = (66 * HZ / 1000);
1375 } else {
1376 /* 24 fps -> 42 msec */
1377 mfd->refresh_timer_duration = (42 * HZ / 1000);
1378 }
1379
1380#ifdef CONFIG_FB_MSM_MDP22
1381 mfd->dma_fnc = mdp_dma2_update;
1382 mfd->dma = &dma2_data;
1383#else
1384 if (mfd->panel_info.pdest == DISPLAY_1) {
1385#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
1386 mfd->dma_fnc = mdp4_mddi_overlay;
1387 mfd->cursor_update = mdp4_mddi_overlay_cursor;
1388#else
1389 mfd->dma_fnc = mdp_dma2_update;
1390#endif
1391 mfd->dma = &dma2_data;
1392 mfd->lut_update = mdp_lut_update_nonlcdc;
1393 mfd->do_histogram = mdp_do_histogram;
1394 } else {
1395 mfd->dma_fnc = mdp_dma_s_update;
1396 mfd->dma = &dma_s_data;
1397 }
1398#endif
1399 if (mdp_pdata)
1400 mfd->vsync_gpio = mdp_pdata->gpio;
1401 else
1402 mfd->vsync_gpio = -1;
1403
1404#ifdef CONFIG_FB_MSM_MDP40
1405 if (mfd->panel.type == EBI2_PANEL)
1406 intf = EBI2_INTF;
1407 else
1408 intf = MDDI_INTF;
1409
1410 if (mfd->panel_info.pdest == DISPLAY_1)
1411 if_no = PRIMARY_INTF_SEL;
1412 else
1413 if_no = SECONDARY_INTF_SEL;
1414
1415 mdp4_display_intf_sel(if_no, intf);
1416#endif
1417 mdp_config_vsync(mfd);
1418 break;
1419
1420#ifdef CONFIG_FB_MSM_MIPI_DSI
1421 case MIPI_VIDEO_PANEL:
1422#ifndef CONFIG_FB_MSM_MDP303
1423 pdata->on = mdp4_dsi_video_on;
1424 pdata->off = mdp4_dsi_video_off;
1425 mfd->hw_refresh = TRUE;
1426 mfd->dma_fnc = mdp4_dsi_video_overlay;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -07001427 mfd->lut_update = mdp_lut_update_lcdc;
1428 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429 if (mfd->panel_info.pdest == DISPLAY_1) {
1430 if_no = PRIMARY_INTF_SEL;
1431 mfd->dma = &dma2_data;
1432 } else {
1433 if_no = EXTERNAL_INTF_SEL;
1434 mfd->dma = &dma_e_data;
1435 }
1436 mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
1437#else
1438 pdata->on = mdp_dsi_video_on;
1439 pdata->off = mdp_dsi_video_off;
1440 mfd->hw_refresh = TRUE;
1441 mfd->dma_fnc = mdp_dsi_video_update;
1442 mfd->do_histogram = mdp_do_histogram;
1443 if (mfd->panel_info.pdest == DISPLAY_1)
1444 mfd->dma = &dma2_data;
1445 else {
1446 printk(KERN_ERR "Invalid Selection of destination panel\n");
1447 rc = -ENODEV;
1448 goto mdp_probe_err;
1449 }
1450
1451#endif
Adrian Salido-Morenod1b9d7a2011-10-14 18:18:51 -07001452 if (mdp_rev >= MDP_REV_40)
1453 mfd->cursor_update = mdp_hw_cursor_sync_update;
1454 else
1455 mfd->cursor_update = mdp_hw_cursor_update;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456 break;
1457
1458 case MIPI_CMD_PANEL:
1459#ifndef CONFIG_FB_MSM_MDP303
1460 mfd->dma_fnc = mdp4_dsi_cmd_overlay;
1461#ifdef CONFIG_FB_MSM_MDP40
1462 mipi = &mfd->panel_info.mipi;
1463 configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
1464#endif
1465 if (mfd->panel_info.pdest == DISPLAY_1) {
1466 if_no = PRIMARY_INTF_SEL;
1467 mfd->dma = &dma2_data;
1468 } else {
1469 if_no = SECONDARY_INTF_SEL;
1470 mfd->dma = &dma_s_data;
1471 }
Carl Vanderlip18f63082011-07-22 12:32:33 -07001472 mfd->lut_update = mdp_lut_update_nonlcdc;
1473 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001474 mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
1475#else
1476 mfd->dma_fnc = mdp_dma2_update;
1477 mfd->do_histogram = mdp_do_histogram;
1478 if (mfd->panel_info.pdest == DISPLAY_1)
1479 mfd->dma = &dma2_data;
1480 else {
1481 printk(KERN_ERR "Invalid Selection of destination panel\n");
1482 rc = -ENODEV;
1483 goto mdp_probe_err;
1484 }
1485#endif
1486 mdp_config_vsync(mfd);
1487 break;
1488#endif
1489
1490#ifdef CONFIG_FB_MSM_DTV
1491 case DTV_PANEL:
1492 pdata->on = mdp4_dtv_on;
1493 pdata->off = mdp4_dtv_off;
1494 mfd->hw_refresh = TRUE;
1495 mfd->cursor_update = mdp_hw_cursor_update;
1496 mfd->dma_fnc = mdp4_dtv_overlay;
1497 mfd->dma = &dma_e_data;
1498 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1499 break;
1500#endif
1501 case HDMI_PANEL:
1502 case LCDC_PANEL:
1503 pdata->on = mdp_lcdc_on;
1504 pdata->off = mdp_lcdc_off;
1505 mfd->hw_refresh = TRUE;
1506#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
1507 mfd->cursor_update = mdp_hw_cursor_sync_update;
1508#else
1509 mfd->cursor_update = mdp_hw_cursor_update;
1510#endif
1511#ifndef CONFIG_FB_MSM_MDP22
1512 mfd->lut_update = mdp_lut_update_lcdc;
1513 mfd->do_histogram = mdp_do_histogram;
1514#endif
1515#ifdef CONFIG_FB_MSM_OVERLAY
1516 mfd->dma_fnc = mdp4_lcdc_overlay;
1517#else
1518 mfd->dma_fnc = mdp_lcdc_update;
1519#endif
1520
1521#ifdef CONFIG_FB_MSM_MDP40
1522 configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
1523 * 23 / 20);
1524 if (mfd->panel.type == HDMI_PANEL) {
1525 mfd->dma = &dma_e_data;
1526 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
1527 } else {
1528 mfd->dma = &dma2_data;
1529 mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
1530 }
1531#else
1532 mfd->dma = &dma2_data;
1533 spin_lock_irqsave(&mdp_spin_lock, flag);
1534 mdp_intr_mask &= ~MDP_DMA_P_DONE;
1535 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
1536 spin_unlock_irqrestore(&mdp_spin_lock, flag);
1537#endif
1538 break;
1539
1540 case TV_PANEL:
1541#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
1542 pdata->on = mdp4_atv_on;
1543 pdata->off = mdp4_atv_off;
1544 mfd->dma_fnc = mdp4_atv_overlay;
1545 mfd->dma = &dma_e_data;
1546 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
1547#else
1548 pdata->on = mdp_dma3_on;
1549 pdata->off = mdp_dma3_off;
1550 mfd->hw_refresh = TRUE;
1551 mfd->dma_fnc = mdp_dma3_update;
1552 mfd->dma = &dma3_data;
1553#endif
1554 break;
1555
Vinay Kalia27020d12011-10-14 17:50:29 -07001556#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
1557 case WRITEBACK_PANEL:
1558 pdata->on = mdp4_overlay_writeback_on;
1559 pdata->off = mdp4_overlay_writeback_off;
1560 mfd->dma_fnc = mdp4_writeback_overlay;
1561 mfd->dma = &dma_e_data;
1562 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1563 break;
1564#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001565 default:
1566 printk(KERN_ERR "mdp_probe: unknown device type!\n");
1567 rc = -ENODEV;
1568 goto mdp_probe_err;
1569 }
1570#ifdef CONFIG_FB_MSM_MDP40
1571 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1572 mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
1573 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1574#endif
1575
1576#ifdef CONFIG_MSM_BUS_SCALING
1577 if (!mdp_bus_scale_handle && mdp_pdata &&
1578 mdp_pdata->mdp_bus_scale_table) {
1579 mdp_bus_scale_handle =
1580 msm_bus_scale_register_client(
1581 mdp_pdata->mdp_bus_scale_table);
1582 if (!mdp_bus_scale_handle) {
1583 printk(KERN_ERR "%s not able to get bus scale\n",
1584 __func__);
1585 return -ENOMEM;
1586 }
1587 }
1588#endif
1589 /* set driver data */
1590 platform_set_drvdata(msm_fb_dev, mfd);
1591
1592 rc = platform_device_add(msm_fb_dev);
1593 if (rc) {
1594 goto mdp_probe_err;
1595 }
1596
1597 pm_runtime_set_active(&pdev->dev);
1598 pm_runtime_enable(&pdev->dev);
1599
1600 pdev_list[pdev_list_cnt++] = pdev;
1601 mdp4_extn_disp = 0;
1602 return 0;
1603
1604 mdp_probe_err:
1605 platform_device_put(msm_fb_dev);
1606#ifdef CONFIG_MSM_BUS_SCALING
1607 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1608 mdp_bus_scale_handle > 0)
1609 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1610#endif
1611 return rc;
1612}
1613
1614#ifdef CONFIG_PM
1615static void mdp_suspend_sub(void)
1616{
1617 /* cancel pipe ctrl worker */
1618 cancel_delayed_work(&mdp_pipe_ctrl_worker);
1619
1620 /* for workder can't be cancelled... */
1621 flush_workqueue(mdp_pipe_ctrl_wq);
1622
1623 /* let's wait for PPP completion */
1624 while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
1625 cpu_relax();
1626
1627 /* try to power down */
1628 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1629
1630 mutex_lock(&mdp_suspend_mutex);
1631 mdp_suspended = TRUE;
1632 mutex_unlock(&mdp_suspend_mutex);
1633}
1634#endif
1635
1636#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1637static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
1638{
1639 if (pdev->id == 0) {
1640 mdp_suspend_sub();
1641 if (mdp_current_clk_on) {
1642 printk(KERN_WARNING"MDP suspend failed\n");
1643 return -EBUSY;
1644 }
1645 }
1646
1647 return 0;
1648}
1649#endif
1650
1651#ifdef CONFIG_HAS_EARLYSUSPEND
1652static void mdp_early_suspend(struct early_suspend *h)
1653{
1654 mdp_suspend_sub();
Ravishangar Kalyanamdf021cf2011-10-20 12:53:27 -07001655#ifdef CONFIG_FB_MSM_DTV
1656 mdp4_dtv_set_black_screen();
1657#endif
Ravishangar Kalyanama7c98912011-09-22 12:44:15 -07001658 if (footswitch && mdp_rev > MDP_REV_42)
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001659 regulator_disable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001660}
1661
1662static void mdp_early_resume(struct early_suspend *h)
1663{
Ravishangar Kalyanama7c98912011-09-22 12:44:15 -07001664 if (footswitch && mdp_rev > MDP_REV_42)
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001665 regulator_enable(footswitch);
1666
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667 mutex_lock(&mdp_suspend_mutex);
1668 mdp_suspended = FALSE;
1669 mutex_unlock(&mdp_suspend_mutex);
1670}
1671#endif
1672
1673static int mdp_remove(struct platform_device *pdev)
1674{
1675 if (footswitch != NULL)
1676 regulator_put(footswitch);
1677 iounmap(msm_mdp_base);
1678 pm_runtime_disable(&pdev->dev);
1679#ifdef CONFIG_MSM_BUS_SCALING
1680 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1681 mdp_bus_scale_handle > 0)
1682 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1683#endif
1684 return 0;
1685}
1686
1687static int mdp_register_driver(void)
1688{
1689#ifdef CONFIG_HAS_EARLYSUSPEND
1690 early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
1691 early_suspend.suspend = mdp_early_suspend;
1692 early_suspend.resume = mdp_early_resume;
1693 register_early_suspend(&early_suspend);
1694#endif
1695
1696 return platform_driver_register(&mdp_driver);
1697}
1698
1699static int __init mdp_driver_init(void)
1700{
1701 int ret;
1702
1703 mdp_drv_init();
1704
1705 ret = mdp_register_driver();
1706 if (ret) {
1707 printk(KERN_ERR "mdp_register_driver() failed!\n");
1708 return ret;
1709 }
1710
1711#if defined(CONFIG_DEBUG_FS)
1712 mdp_debugfs_init();
1713#endif
1714
1715 return 0;
1716
1717}
1718
1719module_init(mdp_driver_init);