blob: 462ede1aa496f65122a4fda54d3e05afa424d3bf [file] [log] [blame]
Pavel Machekd480ace2009-09-22 16:47:03 -07001/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
Pavel Machekd480ace2009-09-22 16:47:03 -07006 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/module.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070019#include <linux/kernel.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/sched.h>
21#include <linux/time.h>
22#include <linux/init.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070023#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <linux/spinlock.h>
25#include <linux/hrtimer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070026#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/hardware.h>
28#include <linux/io.h>
29#include <linux/debugfs.h>
30#include <linux/delay.h>
31#include <linux/mutex.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <asm/system.h>
36#include <asm/mach-types.h>
37#include <linux/semaphore.h>
38#include <linux/uaccess.h>
39#include <mach/clk.h>
40#include "mdp.h"
41#include "msm_fb.h"
42#ifdef CONFIG_FB_MSM_MDP40
43#include "mdp4.h"
44#endif
45#include "mipi_dsi.h"
Pavel Machekd480ace2009-09-22 16:47:03 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047uint32 mdp4_extn_disp;
Pavel Machekd480ace2009-09-22 16:47:03 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049static struct clk *mdp_clk;
50static struct clk *mdp_pclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051static struct clk *mdp_lut_clk;
52int mdp_rev;
Pavel Machekd480ace2009-09-22 16:47:03 -070053
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -070054static struct regulator *footswitch;
Pavel Machekd480ace2009-09-22 16:47:03 -070055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056struct completion mdp_ppp_comp;
57struct semaphore mdp_ppp_mutex;
58struct semaphore mdp_pipe_ctrl_mutex;
Pavel Machekd480ace2009-09-22 16:47:03 -070059
kuogee hsieh562c58f2011-12-08 08:47:33 -080060unsigned long mdp_timer_duration = (HZ/20); /* 50 msecond */
Pavel Machekd480ace2009-09-22 16:47:03 -070061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062boolean mdp_ppp_waiting = FALSE;
63uint32 mdp_tv_underflow_cnt;
64uint32 mdp_lcdc_underflow_cnt;
65
66boolean mdp_current_clk_on = FALSE;
67boolean mdp_is_in_isr = FALSE;
68
69/*
70 * legacy mdp_in_processing is only for DMA2-MDDI
71 * this applies to DMA2 block only
72 */
73uint32 mdp_in_processing = FALSE;
74
75#ifdef CONFIG_FB_MSM_MDP40
76uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
77#else
78uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
79#endif
80
81MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
82
83atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
84
85spinlock_t mdp_spin_lock;
86struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
87struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
88
89static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
90static struct delayed_work mdp_pipe_ctrl_worker;
91
92static boolean mdp_suspended = FALSE;
93DEFINE_MUTEX(mdp_suspend_mutex);
94
95#ifdef CONFIG_FB_MSM_MDP40
96struct mdp_dma_data dma2_data;
97struct mdp_dma_data dma_s_data;
98struct mdp_dma_data dma_e_data;
99ulong mdp4_display_intf;
100#else
101static struct mdp_dma_data dma2_data;
102static struct mdp_dma_data dma_s_data;
103#ifndef CONFIG_FB_MSM_MDP303
104static struct mdp_dma_data dma_e_data;
105#endif
106#endif
107static struct mdp_dma_data dma3_data;
108
109extern ktime_t mdp_dma2_last_update_time;
110
111extern uint32 mdp_dma2_update_time_in_usec;
112extern int mdp_lcd_rd_cnt_offset_slow;
113extern int mdp_lcd_rd_cnt_offset_fast;
114extern int mdp_usec_diff_threshold;
115
116#ifdef CONFIG_FB_MSM_LCDC
117extern int first_pixel_start_x;
118extern int first_pixel_start_y;
119#endif
120
121#ifdef MSM_FB_ENABLE_DBGFS
122struct dentry *mdp_dir;
123#endif
124
125#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
126static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
127#else
128#define mdp_suspend NULL
129#endif
130
131struct timeval mdp_dma2_timeval;
132struct timeval mdp_ppp_timeval;
133
134#ifdef CONFIG_HAS_EARLYSUSPEND
135static struct early_suspend early_suspend;
136#endif
137
138static u32 mdp_irq;
139
140static uint32 mdp_prim_panel_type = NO_PANEL;
141#ifndef CONFIG_FB_MSM_MDP22
142DEFINE_MUTEX(mdp_lut_push_sem);
143static int mdp_lut_i;
144static int mdp_lut_hw_update(struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700145{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 int i;
147 u16 *c[3];
148 u16 r, g, b;
Pavel Machekd480ace2009-09-22 16:47:03 -0700149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150 c[0] = cmap->green;
151 c[1] = cmap->blue;
152 c[2] = cmap->red;
Pavel Machekd480ace2009-09-22 16:47:03 -0700153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 for (i = 0; i < cmap->len; i++) {
155 if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
156 copy_from_user(&g, cmap->green++, sizeof(g)) ||
157 copy_from_user(&b, cmap->blue++, sizeof(b)))
158 return -EFAULT;
159
160#ifdef CONFIG_FB_MSM_MDP40
161 MDP_OUTP(MDP_BASE + 0x94800 +
162#else
163 MDP_OUTP(MDP_BASE + 0x93800 +
164#endif
165 (0x400*mdp_lut_i) + cmap->start*4 + i*4,
166 ((g & 0xff) |
167 ((b & 0xff) << 8) |
168 ((r & 0xff) << 16)));
Pavel Machekd480ace2009-09-22 16:47:03 -0700169 }
170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700172}
173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174static int mdp_lut_push;
175static int mdp_lut_push_i;
176static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 int ret;
179
180 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
181 ret = mdp_lut_hw_update(cmap);
182 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
183
184 if (ret)
185 return ret;
186
187 mutex_lock(&mdp_lut_push_sem);
188 mdp_lut_push = 1;
189 mdp_lut_push_i = mdp_lut_i;
190 mutex_unlock(&mdp_lut_push_sem);
191
192 mdp_lut_i = (mdp_lut_i + 1)%2;
193
194 return 0;
195}
196
197static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
198{
199 int ret;
200
201 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
202 ret = mdp_lut_hw_update(cmap);
203
204 if (ret) {
205 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
206 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -0700207 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208
209 MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
210 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
211 mdp_lut_i = (mdp_lut_i + 1)%2;
212
213 return 0;
214}
215
216static void mdp_lut_enable(void)
217{
218 if (mdp_lut_push) {
219 mutex_lock(&mdp_lut_push_sem);
220 mdp_lut_push = 0;
221 MDP_OUTP(MDP_BASE + 0x90070,
222 (mdp_lut_push_i << 10) | 0x17);
223 mutex_unlock(&mdp_lut_push_sem);
224 }
225}
226
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700227#define MDP_REV42_HIST_MAX_BIN 128
228#define MDP_REV41_HIST_MAX_BIN 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229
230#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700231unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232struct completion mdp_hist_comp;
233boolean mdp_is_hist_start = FALSE;
234#else
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700235static unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236static struct completion mdp_hist_comp;
237static boolean mdp_is_hist_start = FALSE;
238#endif
239static DEFINE_MUTEX(mdp_hist_mutex);
Carl Vanderlipb2408032011-10-27 10:52:48 -0700240static boolean mdp_is_hist_data = FALSE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241
Carl Vanderlipb2408032011-10-27 10:52:48 -0700242/*should hold mdp_hist_mutex before calling this function*/
243int _mdp_histogram_ctrl(boolean en)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244{
245 unsigned long flag;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700246 unsigned long hist_base;
Carl Vanderlip4c4c2862011-10-27 11:28:55 -0700247 uint32_t status;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700248
249 if (mdp_rev >= MDP_REV_40)
250 hist_base = 0x95000;
251 else
252 hist_base = 0x94000;
253
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700254 if (en == TRUE) {
Carl Vanderlipb2408032011-10-27 10:52:48 -0700255 if (mdp_is_hist_start)
256 return -EINVAL;
257
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700258 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
259 mdp_hist_frame_cnt = 1;
260 mdp_enable_irq(MDP_HISTOGRAM_TERM);
261 spin_lock_irqsave(&mdp_spin_lock, flag);
262 if (mdp_is_hist_start == FALSE && mdp_rev >= MDP_REV_40) {
263 MDP_OUTP(MDP_BASE + hist_base + 0x10, 1);
264 MDP_OUTP(MDP_BASE + hist_base + 0x1c, INTR_HIST_DONE);
265 }
266 spin_unlock_irqrestore(&mdp_spin_lock, flag);
267 MDP_OUTP(MDP_BASE + hist_base + 0x4, mdp_hist_frame_cnt);
268 MDP_OUTP(MDP_BASE + hist_base, 1);
269 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
Carl Vanderlipb2408032011-10-27 10:52:48 -0700270 mdp_is_hist_data = TRUE;
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700271 } else {
Carl Vanderlipb2408032011-10-27 10:52:48 -0700272 if (!mdp_is_hist_start && !mdp_is_hist_data)
273 return -EINVAL;
274
275 mdp_is_hist_data = FALSE;
276 complete(&mdp_hist_comp);
277
Carl Vanderlip4c4c2862011-10-27 11:28:55 -0700278 if (mdp_rev >= MDP_REV_40) {
279 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
280 status = inpdw(MDP_BASE + hist_base + 0x1C);
281 status &= ~INTR_HIST_DONE;
282 MDP_OUTP(MDP_BASE + hist_base + 0x1C, status);
283
284 MDP_OUTP(MDP_BASE + hist_base + 0x18, INTR_HIST_DONE);
285 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
286 FALSE);
287 }
288
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700289 mdp_disable_irq(MDP_HISTOGRAM_TERM);
Pavel Machekd480ace2009-09-22 16:47:03 -0700290 }
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700291
Pavel Machekd480ace2009-09-22 16:47:03 -0700292 return 0;
293}
294
Carl Vanderlipb2408032011-10-27 10:52:48 -0700295int mdp_histogram_ctrl(boolean en)
296{
297 int ret = 0;
298 mutex_lock(&mdp_hist_mutex);
299 ret = _mdp_histogram_ctrl(en);
300 mutex_unlock(&mdp_hist_mutex);
301 return ret;
302}
303
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304int mdp_start_histogram(struct fb_info *info)
Pavel Machekd480ace2009-09-22 16:47:03 -0700305{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306 unsigned long flag;
Pavel Machekd480ace2009-09-22 16:47:03 -0700307
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308 int ret = 0;
309 mutex_lock(&mdp_hist_mutex);
310 if (mdp_is_hist_start == TRUE) {
311 printk(KERN_ERR "%s histogram already started\n", __func__);
312 ret = -EPERM;
313 goto mdp_hist_start_err;
314 }
315
Carl Vanderlipb2408032011-10-27 10:52:48 -0700316 ret = _mdp_histogram_ctrl(TRUE);
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 spin_lock_irqsave(&mdp_spin_lock, flag);
319 mdp_is_hist_start = TRUE;
320 spin_unlock_irqrestore(&mdp_spin_lock, flag);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321
322mdp_hist_start_err:
323 mutex_unlock(&mdp_hist_mutex);
324 return ret;
325
326}
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328int mdp_stop_histogram(struct fb_info *info)
329{
330 unsigned long flag;
331 int ret = 0;
332 mutex_lock(&mdp_hist_mutex);
333 if (!mdp_is_hist_start) {
334 printk(KERN_ERR "%s histogram already stopped\n", __func__);
335 ret = -EPERM;
336 goto mdp_hist_stop_err;
337 }
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700338
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 spin_lock_irqsave(&mdp_spin_lock, flag);
340 mdp_is_hist_start = FALSE;
341 spin_unlock_irqrestore(&mdp_spin_lock, flag);
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700342
Carl Vanderlipb2408032011-10-27 10:52:48 -0700343 ret = _mdp_histogram_ctrl(FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344
345mdp_hist_stop_err:
346 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700347 return ret;
348}
349
Carl Vanderlipb2408032011-10-27 10:52:48 -0700350/*call from within mdp_hist_mutex*/
351static int _mdp_copy_hist_data(struct mdp_histogram *hist)
Pavel Machekd480ace2009-09-22 16:47:03 -0700352{
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700353 char *mdp_hist_base;
354 uint32 r_data_offset = 0x100, g_data_offset = 0x200;
355 uint32 b_data_offset = 0x300;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 int ret = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700357
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700358 if (mdp_rev >= MDP_REV_42) {
359 mdp_hist_base = MDP_BASE + 0x95000;
360 r_data_offset = 0x400;
361 g_data_offset = 0x800;
362 b_data_offset = 0xc00;
363 } else if (mdp_rev >= MDP_REV_40 && mdp_rev <= MDP_REV_41) {
364 mdp_hist_base = MDP_BASE + 0x95000;
365 } else if (mdp_rev >= MDP_REV_30 && mdp_rev <= MDP_REV_31) {
366 mdp_hist_base = MDP_BASE + 0x94000;
367 } else {
368 pr_err("%s(): Unsupported MDP rev %u\n", __func__, mdp_rev);
369 return -EPERM;
370 }
371
372 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
373 if (hist->r) {
374 ret = copy_to_user(hist->r, mdp_hist_base + r_data_offset,
375 hist->bin_cnt * 4);
376 if (ret)
377 goto hist_err;
378 }
379 if (hist->g) {
380 ret = copy_to_user(hist->g, mdp_hist_base + g_data_offset,
381 hist->bin_cnt * 4);
382 if (ret)
383 goto hist_err;
384 }
385 if (hist->b) {
386 ret = copy_to_user(hist->b, mdp_hist_base + b_data_offset,
387 hist->bin_cnt * 4);
388 if (ret)
389 goto hist_err;
390 }
391
392 if (mdp_is_hist_start == TRUE) {
393 MDP_OUTP(mdp_hist_base + 0x004,
394 mdp_hist_frame_cnt);
395 MDP_OUTP(mdp_hist_base, 1);
396 }
397 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700398 return 0;
399
400hist_err:
401 printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
402 return ret;
403}
404
405static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
406{
Carl Vanderlipb2408032011-10-27 10:52:48 -0700407 int ret = 0;
408
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700409 if (!hist->frame_cnt || (hist->bin_cnt == 0))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410 return -EINVAL;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700411
412 if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN)
413 || (mdp_rev == MDP_REV_42 &&
414 hist->bin_cnt > MDP_REV42_HIST_MAX_BIN))
415 return -EINVAL;
416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 mutex_lock(&mdp_hist_mutex);
Carl Vanderlipb2408032011-10-27 10:52:48 -0700418 if (!mdp_is_hist_data) {
419 ret = -EINVAL;
420 goto error;
421 }
422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 if (!mdp_is_hist_start) {
424 printk(KERN_ERR "%s histogram not started\n", __func__);
Carl Vanderlipb2408032011-10-27 10:52:48 -0700425 ret = -EPERM;
426 goto error;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 INIT_COMPLETION(mdp_hist_comp);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700430 mdp_hist_frame_cnt = hist->frame_cnt;
Carl Vanderlipb2408032011-10-27 10:52:48 -0700431 mutex_unlock(&mdp_hist_mutex);
432
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433 wait_for_completion_killable(&mdp_hist_comp);
434
Carl Vanderlipb2408032011-10-27 10:52:48 -0700435 mutex_lock(&mdp_hist_mutex);
436 if (mdp_is_hist_data)
437 ret = _mdp_copy_hist_data(hist);
438error:
439 mutex_unlock(&mdp_hist_mutex);
440 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441}
442#endif
443
444/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
445
446int mdp_ppp_pipe_wait(void)
447{
448 int ret = 1;
449
450 /* wait 5 seconds for the operation to complete before declaring
451 the MDP hung */
452
453 if (mdp_ppp_waiting == TRUE) {
454 ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
455 5 * HZ);
456
457 if (!ret)
458 printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
459 __func__);
Pavel Machekd480ace2009-09-22 16:47:03 -0700460 }
461
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462 return ret;
463}
Pavel Machekd480ace2009-09-22 16:47:03 -0700464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465static DEFINE_SPINLOCK(mdp_lock);
466static int mdp_irq_mask;
467static int mdp_irq_enabled;
Pavel Machekd480ace2009-09-22 16:47:03 -0700468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469/*
470 * mdp_enable_irq: can not be called from isr
471 */
472void mdp_enable_irq(uint32 term)
473{
474 unsigned long irq_flags;
475
476 spin_lock_irqsave(&mdp_lock, irq_flags);
477 if (mdp_irq_mask & term) {
478 printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
479 __func__, term, mdp_irq_mask, mdp_irq_enabled);
480 } else {
481 mdp_irq_mask |= term;
482 if (mdp_irq_mask && !mdp_irq_enabled) {
483 mdp_irq_enabled = 1;
484 enable_irq(mdp_irq);
485 }
486 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700487 spin_unlock_irqrestore(&mdp_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488}
489
490/*
491 * mdp_disable_irq: can not be called from isr
492 */
493void mdp_disable_irq(uint32 term)
494{
495 unsigned long irq_flags;
496
497 spin_lock_irqsave(&mdp_lock, irq_flags);
498 if (!(mdp_irq_mask & term)) {
499 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
500 __func__, term, mdp_irq_mask, mdp_irq_enabled);
501 } else {
502 mdp_irq_mask &= ~term;
503 if (!mdp_irq_mask && mdp_irq_enabled) {
504 mdp_irq_enabled = 0;
505 disable_irq(mdp_irq);
506 }
507 }
508 spin_unlock_irqrestore(&mdp_lock, irq_flags);
509}
510
511void mdp_disable_irq_nosync(uint32 term)
512{
513 spin_lock(&mdp_lock);
514 if (!(mdp_irq_mask & term)) {
515 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
516 __func__, term, mdp_irq_mask, mdp_irq_enabled);
517 } else {
518 mdp_irq_mask &= ~term;
519 if (!mdp_irq_mask && mdp_irq_enabled) {
520 mdp_irq_enabled = 0;
521 disable_irq_nosync(mdp_irq);
522 }
523 }
524 spin_unlock(&mdp_lock);
525}
526
527void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
528{
529 /* complete all the writes before starting */
530 wmb();
531
532 /* kick off PPP engine */
533 if (term == MDP_PPP_TERM) {
534 if (mdp_debug[MDP_PPP_BLOCK])
535 jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
536
537 /* let's turn on PPP block */
538 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
539
540 mdp_enable_irq(term);
541 INIT_COMPLETION(mdp_ppp_comp);
542 mdp_ppp_waiting = TRUE;
543 outpdw(MDP_BASE + 0x30, 0x1000);
544 wait_for_completion_killable(&mdp_ppp_comp);
545 mdp_disable_irq(term);
546
547 if (mdp_debug[MDP_PPP_BLOCK]) {
548 struct timeval now;
549
550 jiffies_to_timeval(jiffies, &now);
551 mdp_ppp_timeval.tv_usec =
552 now.tv_usec - mdp_ppp_timeval.tv_usec;
553 MSM_FB_DEBUG("MDP-PPP: %d\n",
554 (int)mdp_ppp_timeval.tv_usec);
555 }
556 } else if (term == MDP_DMA2_TERM) {
557 if (mdp_debug[MDP_DMA2_BLOCK]) {
558 MSM_FB_DEBUG("MDP-DMA2: %d\n",
559 (int)mdp_dma2_timeval.tv_usec);
560 jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
561 }
562 /* DMA update timestamp */
563 mdp_dma2_last_update_time = ktime_get_real();
564 /* let's turn on DMA2 block */
565#if 0
566 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
567#endif
568#ifdef CONFIG_FB_MSM_MDP22
569 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
570#else
571 mdp_lut_enable();
572
573#ifdef CONFIG_FB_MSM_MDP40
574 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
575#else
576 outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
577
578#ifdef CONFIG_FB_MSM_MDP303
579
580#ifdef CONFIG_FB_MSM_MIPI_DSI
kuogee hsieh8717a172011-09-05 09:57:58 -0700581 mipi_dsi_cmd_mdp_start();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582#endif
583
584#endif
585
586#endif
587#endif
588#ifdef CONFIG_FB_MSM_MDP40
589 } else if (term == MDP_DMA_S_TERM) {
590 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
591 outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
592 } else if (term == MDP_DMA_E_TERM) {
593 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
594 outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
595 } else if (term == MDP_OVERLAY0_TERM) {
596 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
597 mdp_lut_enable();
598 outpdw(MDP_BASE + 0x0004, 0);
599 } else if (term == MDP_OVERLAY1_TERM) {
600 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
601 mdp_lut_enable();
602 outpdw(MDP_BASE + 0x0008, 0);
603 }
604#else
605 } else if (term == MDP_DMA_S_TERM) {
606 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
607 outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
608 } else if (term == MDP_DMA_E_TERM) {
609 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
610 outpdw(MDP_BASE + 0x004C, 0x0);
611 }
612#endif
613}
614static int mdp_clk_rate;
615static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
616static int pdev_list_cnt;
617
618static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
619{
620 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
621}
622void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
623 boolean isr)
624{
625 boolean mdp_all_blocks_off = TRUE;
626 int i;
627 unsigned long flag;
628 struct msm_fb_panel_data *pdata;
629
630 /*
631 * It is assumed that if isr = TRUE then start = OFF
632 * if start = ON when isr = TRUE it could happen that the usercontext
633 * could turn off the clocks while the interrupt is updating the
634 * power to ON
635 */
636 WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
637
638 spin_lock_irqsave(&mdp_spin_lock, flag);
639 if (MDP_BLOCK_POWER_ON == state) {
640 atomic_inc(&mdp_block_power_cnt[block]);
641
642 if (MDP_DMA2_BLOCK == block)
643 mdp_in_processing = TRUE;
644 } else {
645 atomic_dec(&mdp_block_power_cnt[block]);
646
647 if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
648 /*
649 * Master has to serve a request to power off MDP always
650 * It also has a timer to power off. So, in case of
651 * timer expires first and DMA2 finishes later,
652 * master has to power off two times
653 * There shouldn't be multiple power-off request for
654 * other blocks
655 */
656 if (block != MDP_MASTER_BLOCK) {
657 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
658 multiple power-off request\n", block);
659 }
660 atomic_set(&mdp_block_power_cnt[block], 0);
661 }
662
663 if (MDP_DMA2_BLOCK == block)
664 mdp_in_processing = FALSE;
665 }
666 spin_unlock_irqrestore(&mdp_spin_lock, flag);
667
668 /*
669 * If it's in isr, we send our request to workqueue.
670 * Otherwise, processing happens in the current context
671 */
672 if (isr) {
673 if (mdp_current_clk_on) {
674 /* checking all blocks power state */
675 for (i = 0; i < MDP_MAX_BLOCK; i++) {
676 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
677 mdp_all_blocks_off = FALSE;
678 break;
679 }
680 }
681
682 if (mdp_all_blocks_off) {
683 /* send workqueue to turn off mdp power */
684 queue_delayed_work(mdp_pipe_ctrl_wq,
685 &mdp_pipe_ctrl_worker,
686 mdp_timer_duration);
687 }
688 }
689 } else {
690 down(&mdp_pipe_ctrl_mutex);
691 /* checking all blocks power state */
692 for (i = 0; i < MDP_MAX_BLOCK; i++) {
693 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
694 mdp_all_blocks_off = FALSE;
695 break;
696 }
697 }
698
699 /*
700 * find out whether a delayable work item is currently
701 * pending
702 */
703
704 if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
705 /*
706 * try to cancel the current work if it fails to
707 * stop (which means del_timer can't delete it
708 * from the list, it's about to expire and run),
709 * we have to let it run. queue_delayed_work won't
710 * accept the next job which is same as
711 * queue_delayed_work(mdp_timer_duration = 0)
712 */
713 cancel_delayed_work(&mdp_pipe_ctrl_worker);
714 }
715
716 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
717 mutex_lock(&mdp_suspend_mutex);
718 if (block == MDP_MASTER_BLOCK || mdp_suspended) {
719 mdp_current_clk_on = FALSE;
720 mb();
721 /* turn off MDP clks */
722 mdp_vsync_clk_disable();
723 for (i = 0; i < pdev_list_cnt; i++) {
724 pdata = (struct msm_fb_panel_data *)
725 pdev_list[i]->dev.platform_data;
726 if (pdata && pdata->clk_func)
727 pdata->clk_func(0);
728 }
729 if (mdp_clk != NULL) {
730 mdp_clk_rate = clk_get_rate(mdp_clk);
731 clk_disable(mdp_clk);
732 if (mdp_hw_revision <=
733 MDP4_REVISION_V2_1 &&
734 mdp_clk_rate > 122880000) {
735 clk_set_rate(mdp_clk,
736 122880000);
737 }
738 MSM_FB_DEBUG("MDP CLK OFF\n");
739 }
740 if (mdp_pclk != NULL) {
741 clk_disable(mdp_pclk);
742 MSM_FB_DEBUG("MDP PCLK OFF\n");
743 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 if (mdp_lut_clk != NULL)
745 clk_disable(mdp_lut_clk);
746 } else {
747 /* send workqueue to turn off mdp power */
748 queue_delayed_work(mdp_pipe_ctrl_wq,
749 &mdp_pipe_ctrl_worker,
750 mdp_timer_duration);
751 }
752 mutex_unlock(&mdp_suspend_mutex);
753 } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
754 mdp_current_clk_on = TRUE;
755 /* turn on MDP clks */
756 for (i = 0; i < pdev_list_cnt; i++) {
757 pdata = (struct msm_fb_panel_data *)
758 pdev_list[i]->dev.platform_data;
759 if (pdata && pdata->clk_func)
760 pdata->clk_func(1);
761 }
762 if (mdp_clk != NULL) {
763 if (mdp_hw_revision <=
764 MDP4_REVISION_V2_1 &&
765 mdp_clk_rate > 122880000) {
766 clk_set_rate(mdp_clk,
767 mdp_clk_rate);
768 }
769 clk_enable(mdp_clk);
770 MSM_FB_DEBUG("MDP CLK ON\n");
771 }
772 if (mdp_pclk != NULL) {
773 clk_enable(mdp_pclk);
774 MSM_FB_DEBUG("MDP PCLK ON\n");
775 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 if (mdp_lut_clk != NULL)
777 clk_enable(mdp_lut_clk);
778 mdp_vsync_clk_enable();
779 }
780 up(&mdp_pipe_ctrl_mutex);
781 }
782}
783
784#ifndef CONFIG_FB_MSM_MDP40
785irqreturn_t mdp_isr(int irq, void *ptr)
786{
787 uint32 mdp_interrupt = 0;
788 struct mdp_dma_data *dma;
789
790 mdp_is_in_isr = TRUE;
791 do {
792 mdp_interrupt = inp32(MDP_INTR_STATUS);
793 outp32(MDP_INTR_CLEAR, mdp_interrupt);
794
795 mdp_interrupt &= mdp_intr_mask;
796
797 if (mdp_interrupt & TV_ENC_UNDERRUN) {
798 mdp_interrupt &= ~(TV_ENC_UNDERRUN);
799 mdp_tv_underflow_cnt++;
800 }
801
802 if (!mdp_interrupt)
803 break;
804
805 /* DMA3 TV-Out Start */
806 if (mdp_interrupt & TV_OUT_DMA3_START) {
807 /* let's disable TV out interrupt */
808 mdp_intr_mask &= ~TV_OUT_DMA3_START;
809 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
810
811 dma = &dma3_data;
812 if (dma->waiting) {
813 dma->waiting = FALSE;
814 complete(&dma->comp);
815 }
816 }
817#ifndef CONFIG_FB_MSM_MDP22
818 if (mdp_interrupt & MDP_HIST_DONE) {
819 outp32(MDP_BASE + 0x94018, 0x3);
820 outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 complete(&mdp_hist_comp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700822 }
823
824 /* LCDC UnderFlow */
825 if (mdp_interrupt & LCDC_UNDERFLOW) {
826 mdp_lcdc_underflow_cnt++;
827 /*when underflow happens HW resets all the histogram
828 registers that were set before so restore them back
829 to normal.*/
830 MDP_OUTP(MDP_BASE + 0x94010, 1);
831 MDP_OUTP(MDP_BASE + 0x9401c, 2);
832 if (mdp_is_hist_start == TRUE) {
833 MDP_OUTP(MDP_BASE + 0x94004,
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700834 mdp_hist_frame_cnt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835 MDP_OUTP(MDP_BASE + 0x94000, 1);
836 }
837 }
838 /* LCDC Frame Start */
839 if (mdp_interrupt & LCDC_FRAME_START) {
840 /* let's disable LCDC interrupt */
841 mdp_intr_mask &= ~LCDC_FRAME_START;
842 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
843
844 dma = &dma2_data;
845 if (dma->waiting) {
846 dma->waiting = FALSE;
847 complete(&dma->comp);
848 }
849 }
850
851 /* DMA2 LCD-Out Complete */
852 if (mdp_interrupt & MDP_DMA_S_DONE) {
853 dma = &dma_s_data;
854 dma->busy = FALSE;
855 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
856 TRUE);
857 complete(&dma->comp);
858 }
859 /* DMA_E LCD-Out Complete */
860 if (mdp_interrupt & MDP_DMA_E_DONE) {
861 dma = &dma_s_data;
862 dma->busy = FALSE;
863 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
864 TRUE);
865 complete(&dma->comp);
866 }
867
868#endif
869
870 /* DMA2 LCD-Out Complete */
871 if (mdp_interrupt & MDP_DMA_P_DONE) {
872 struct timeval now;
873
874 mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
875 mdp_dma2_last_update_time);
876 if (mdp_debug[MDP_DMA2_BLOCK]) {
877 jiffies_to_timeval(jiffies, &now);
878 mdp_dma2_timeval.tv_usec =
879 now.tv_usec - mdp_dma2_timeval.tv_usec;
880 }
881#ifndef CONFIG_FB_MSM_MDP303
882 dma = &dma2_data;
883 dma->busy = FALSE;
884 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
885 TRUE);
886 complete(&dma->comp);
887#else
888 if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
889 dma = &dma2_data;
890 dma->busy = FALSE;
891 mdp_pipe_ctrl(MDP_DMA2_BLOCK,
892 MDP_BLOCK_POWER_OFF, TRUE);
893 complete(&dma->comp);
894 }
895#endif
896 }
897 /* PPP Complete */
898 if (mdp_interrupt & MDP_PPP_DONE) {
899#ifdef CONFIG_FB_MSM_MDP31
900 MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
901#endif
902 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
903 if (mdp_ppp_waiting) {
904 mdp_ppp_waiting = FALSE;
905 complete(&mdp_ppp_comp);
906 }
907 }
908 } while (1);
909
910 mdp_is_in_isr = FALSE;
911
Pavel Machekd480ace2009-09-22 16:47:03 -0700912 return IRQ_HANDLED;
913}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916static void mdp_drv_init(void)
Pavel Machekd480ace2009-09-22 16:47:03 -0700917{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918 int i;
Pavel Machekd480ace2009-09-22 16:47:03 -0700919
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 for (i = 0; i < MDP_MAX_BLOCK; i++) {
921 mdp_debug[i] = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700922 }
923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924 /* initialize spin lock and workqueue */
925 spin_lock_init(&mdp_spin_lock);
926 mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
927 mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
928 mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
929 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
930 mdp_pipe_ctrl_workqueue_handler);
Pavel Machekd480ace2009-09-22 16:47:03 -0700931
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 /* initialize semaphore */
933 init_completion(&mdp_ppp_comp);
934 sema_init(&mdp_ppp_mutex, 1);
935 sema_init(&mdp_pipe_ctrl_mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 dma2_data.busy = FALSE;
938 dma2_data.dmap_busy = FALSE;
939 dma2_data.waiting = FALSE;
940 init_completion(&dma2_data.comp);
941 init_completion(&dma2_data.dmap_comp);
942 sema_init(&dma2_data.mutex, 1);
943 mutex_init(&dma2_data.ov_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945 dma3_data.busy = FALSE;
946 dma3_data.waiting = FALSE;
947 init_completion(&dma3_data.comp);
948 sema_init(&dma3_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 dma_s_data.busy = FALSE;
951 dma_s_data.waiting = FALSE;
952 init_completion(&dma_s_data.comp);
953 sema_init(&dma_s_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955#ifndef CONFIG_FB_MSM_MDP303
956 dma_e_data.busy = FALSE;
957 dma_e_data.waiting = FALSE;
958 init_completion(&dma_e_data.comp);
959 mutex_init(&dma_e_data.ov_mutex);
960#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962#ifndef CONFIG_FB_MSM_MDP22
963 init_completion(&mdp_hist_comp);
964#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966 /* initializing mdp power block counter to 0 */
967 for (i = 0; i < MDP_MAX_BLOCK; i++) {
968 atomic_set(&mdp_block_power_cnt[i], 0);
Pavel Machekd480ace2009-09-22 16:47:03 -0700969 }
970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971#ifdef MSM_FB_ENABLE_DBGFS
972 {
973 struct dentry *root;
974 char sub_name[] = "mdp";
Pavel Machekd480ace2009-09-22 16:47:03 -0700975
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 root = msm_fb_get_debugfs_root();
977 if (root != NULL) {
978 mdp_dir = debugfs_create_dir(sub_name, root);
979
980 if (mdp_dir) {
981 msm_fb_debugfs_file_create(mdp_dir,
982 "dma2_update_time_in_usec",
983 (u32 *) &mdp_dma2_update_time_in_usec);
984 msm_fb_debugfs_file_create(mdp_dir,
985 "vs_rdcnt_slow",
986 (u32 *) &mdp_lcd_rd_cnt_offset_slow);
987 msm_fb_debugfs_file_create(mdp_dir,
988 "vs_rdcnt_fast",
989 (u32 *) &mdp_lcd_rd_cnt_offset_fast);
990 msm_fb_debugfs_file_create(mdp_dir,
991 "mdp_usec_diff_threshold",
992 (u32 *) &mdp_usec_diff_threshold);
993 msm_fb_debugfs_file_create(mdp_dir,
994 "mdp_current_clk_on",
995 (u32 *) &mdp_current_clk_on);
996#ifdef CONFIG_FB_MSM_LCDC
997 msm_fb_debugfs_file_create(mdp_dir,
998 "lcdc_start_x",
999 (u32 *) &first_pixel_start_x);
1000 msm_fb_debugfs_file_create(mdp_dir,
1001 "lcdc_start_y",
1002 (u32 *) &first_pixel_start_y);
1003#endif
1004 }
Pavel Machekd480ace2009-09-22 16:47:03 -07001005 }
Pavel Machekd480ace2009-09-22 16:47:03 -07001006 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007#endif
1008}
1009
1010static int mdp_probe(struct platform_device *pdev);
1011static int mdp_remove(struct platform_device *pdev);
1012
1013static int mdp_runtime_suspend(struct device *dev)
1014{
1015 dev_dbg(dev, "pm_runtime: suspending...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -07001016 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -07001017}
1018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001019static int mdp_runtime_resume(struct device *dev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001020{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001021 dev_dbg(dev, "pm_runtime: resuming...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -07001022 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -07001023}
1024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001025static struct dev_pm_ops mdp_dev_pm_ops = {
1026 .runtime_suspend = mdp_runtime_suspend,
1027 .runtime_resume = mdp_runtime_resume,
Pavel Machekd480ace2009-09-22 16:47:03 -07001028};
1029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030
1031static struct platform_driver mdp_driver = {
1032 .probe = mdp_probe,
1033 .remove = mdp_remove,
1034#ifndef CONFIG_HAS_EARLYSUSPEND
1035 .suspend = mdp_suspend,
1036 .resume = NULL,
1037#endif
1038 .shutdown = NULL,
1039 .driver = {
1040 /*
1041 * Driver name must match the device name added in
1042 * platform.c.
1043 */
1044 .name = "mdp",
1045 .pm = &mdp_dev_pm_ops,
1046 },
1047};
1048
1049static int mdp_off(struct platform_device *pdev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001050{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001051 int ret = 0;
1052 mdp_histogram_ctrl(FALSE);
1053
1054 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1055 ret = panel_next_off(pdev);
1056 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1057
1058 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -07001059}
1060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061static int mdp_on(struct platform_device *pdev)
1062{
1063 int ret = 0;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001066 struct msm_fb_data_type *mfd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1068 if (is_mdp4_hw_reset()) {
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001069 mfd = platform_get_drvdata(pdev);
1070 mdp_vsync_cfg_regs(mfd, FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 mdp4_hw_init();
1072 outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
1073 }
1074 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1075#endif
1076 mdp_histogram_ctrl(TRUE);
1077
1078 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1079 ret = panel_next_on(pdev);
1080 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1081 return ret;
1082}
1083
1084static int mdp_resource_initialized;
1085static struct msm_panel_common_pdata *mdp_pdata;
1086
1087uint32 mdp_hw_revision;
1088
1089/*
1090 * mdp_hw_revision:
1091 * 0 == V1
1092 * 1 == V2
1093 * 2 == V2.1
1094 *
1095 */
1096void mdp_hw_version(void)
1097{
1098 char *cp;
1099 uint32 *hp;
1100
1101 if (mdp_pdata == NULL)
1102 return;
1103
1104 mdp_hw_revision = MDP4_REVISION_NONE;
1105 if (mdp_pdata->hw_revision_addr == 0)
1106 return;
1107
1108 /* tlmmgpio2 shadow */
1109 cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
1110
1111 if (cp == NULL)
1112 return;
1113
1114 hp = (uint32 *)cp; /* HW_REVISION_NUMBER */
1115 mdp_hw_revision = *hp;
1116 iounmap(cp);
1117
1118 mdp_hw_revision >>= 28; /* bit 31:28 */
1119 mdp_hw_revision &= 0x0f;
1120
1121 MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
1122 __func__, mdp_hw_revision);
1123}
1124
kuogee hsieh5a7f32c2011-08-31 17:51:34 -07001125int mdp4_writeback_offset(void)
1126{
1127 int off = 0;
1128
1129 if (mdp_pdata->writeback_offset)
1130 off = mdp_pdata->writeback_offset();
1131
1132 pr_debug("%s: writeback_offset=%d %x\n", __func__, off, off);
1133
1134 return off;
1135}
1136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137#ifdef CONFIG_FB_MSM_MDP40
1138static void configure_mdp_core_clk_table(uint32 min_clk_rate)
1139{
1140 uint8 count;
1141 uint32 current_rate;
Matt Wagantalla12cc952011-11-08 18:14:50 -08001142 if (mdp_clk && mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1143 min_clk_rate = clk_round_rate(mdp_clk, min_clk_rate);
1144 if (clk_set_rate(mdp_clk, min_clk_rate) < 0)
1145 printk(KERN_ERR "%s: clk_set_rate failed\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146 __func__);
1147 else {
1148 count = 0;
1149 current_rate = clk_get_rate(mdp_clk);
1150 while (count < mdp_pdata->num_mdp_clk) {
1151 if (mdp_pdata->mdp_core_clk_table[count]
1152 < current_rate) {
1153 mdp_pdata->
1154 mdp_core_clk_table[count] =
1155 current_rate;
1156 }
1157 count++;
1158 }
1159 }
1160 }
1161}
1162#endif
1163
1164#ifdef CONFIG_MSM_BUS_SCALING
1165static uint32_t mdp_bus_scale_handle;
1166int mdp_bus_scale_update_request(uint32_t index)
1167{
1168 if (!mdp_pdata && (!mdp_pdata->mdp_bus_scale_table
1169 || index > (mdp_pdata->mdp_bus_scale_table->num_usecases - 1))) {
1170 printk(KERN_ERR "%s invalid table or index\n", __func__);
1171 return -EINVAL;
1172 }
1173 if (mdp_bus_scale_handle < 1) {
1174 printk(KERN_ERR "%s invalid bus handle\n", __func__);
1175 return -EINVAL;
1176 }
1177 return msm_bus_scale_client_update_request(mdp_bus_scale_handle,
1178 index);
1179}
1180#endif
1181DEFINE_MUTEX(mdp_clk_lock);
1182int mdp_set_core_clk(uint16 perf_level)
1183{
1184 int ret = -EINVAL;
1185 if (mdp_clk && mdp_pdata
1186 && mdp_pdata->mdp_core_clk_table) {
1187 if (perf_level > mdp_pdata->num_mdp_clk)
1188 printk(KERN_ERR "%s invalid perf level\n", __func__);
1189 else {
1190 mutex_lock(&mdp_clk_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 ret = clk_set_rate(mdp_clk,
1192 mdp_pdata->
1193 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1194 - perf_level]);
1195 mutex_unlock(&mdp_clk_lock);
1196 if (ret) {
1197 printk(KERN_ERR "%s unable to set mdp_core_clk rate\n",
1198 __func__);
1199 }
1200 }
1201 }
1202 return ret;
1203}
1204
1205unsigned long mdp_get_core_clk(void)
1206{
1207 unsigned long clk_rate = 0;
1208 if (mdp_clk) {
1209 mutex_lock(&mdp_clk_lock);
1210 clk_rate = clk_get_rate(mdp_clk);
1211 mutex_unlock(&mdp_clk_lock);
1212 }
1213
1214 return clk_rate;
1215}
1216
1217unsigned long mdp_perf_level2clk_rate(uint32 perf_level)
1218{
1219 unsigned long clk_rate = 0;
1220
1221 if (mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1222 if (perf_level > mdp_pdata->num_mdp_clk) {
1223 printk(KERN_ERR "%s invalid perf level\n", __func__);
1224 clk_rate = mdp_get_core_clk();
1225 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226 clk_rate = mdp_pdata->
1227 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1228 - perf_level];
1229 }
1230 } else
1231 clk_rate = mdp_get_core_clk();
1232
1233 return clk_rate;
1234}
1235
1236static int mdp_irq_clk_setup(void)
1237{
1238 int ret;
1239
1240#ifdef CONFIG_FB_MSM_MDP40
1241 ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
1242#else
1243 ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
1244#endif
1245 if (ret) {
1246 printk(KERN_ERR "mdp request_irq() failed!\n");
1247 return ret;
1248 }
1249 disable_irq(mdp_irq);
1250
1251 footswitch = regulator_get(NULL, "fs_mdp");
1252 if (IS_ERR(footswitch))
1253 footswitch = NULL;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001254 else
1255 regulator_enable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256
1257 mdp_clk = clk_get(NULL, "mdp_clk");
1258 if (IS_ERR(mdp_clk)) {
1259 ret = PTR_ERR(mdp_clk);
1260 printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
1261 free_irq(mdp_irq, 0);
1262 return ret;
1263 }
1264
1265 mdp_pclk = clk_get(NULL, "mdp_pclk");
1266 if (IS_ERR(mdp_pclk))
1267 mdp_pclk = NULL;
1268
1269 if (mdp_rev == MDP_REV_42) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 mdp_lut_clk = clk_get(NULL, "lut_mdp");
1271 if (IS_ERR(mdp_lut_clk)) {
1272 ret = PTR_ERR(mdp_lut_clk);
1273 pr_err("can't get mdp_clk error:%d!\n", ret);
1274 clk_put(mdp_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 free_irq(mdp_irq, 0);
1276 return ret;
1277 }
1278 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 mdp_lut_clk = NULL;
1280 }
1281
1282#ifdef CONFIG_FB_MSM_MDP40
1283 /*
1284 * mdp_clk should greater than mdp_pclk always
1285 */
1286 if (mdp_pdata && mdp_pdata->mdp_core_clk_rate) {
1287 mutex_lock(&mdp_clk_lock);
1288 clk_set_rate(mdp_clk, mdp_pdata->mdp_core_clk_rate);
1289 if (mdp_lut_clk != NULL)
1290 clk_set_rate(mdp_lut_clk, mdp_pdata->mdp_core_clk_rate);
1291 mutex_unlock(&mdp_clk_lock);
1292 }
1293 MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
1294#endif
1295 return 0;
1296}
1297
1298static int mdp_probe(struct platform_device *pdev)
1299{
1300 struct platform_device *msm_fb_dev = NULL;
1301 struct msm_fb_data_type *mfd;
1302 struct msm_fb_panel_data *pdata = NULL;
1303 int rc;
1304 resource_size_t size ;
1305#ifdef CONFIG_FB_MSM_MDP40
1306 int intf, if_no;
1307#else
1308 unsigned long flag;
1309#endif
1310#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
1311 struct mipi_panel_info *mipi;
1312#endif
1313
1314 if ((pdev->id == 0) && (pdev->num_resources > 0)) {
1315 mdp_pdata = pdev->dev.platform_data;
1316
1317 size = resource_size(&pdev->resource[0]);
1318 msm_mdp_base = ioremap(pdev->resource[0].start, size);
1319
1320 MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
1321 (int)pdev->resource[0].start, (int)msm_mdp_base);
1322
1323 if (unlikely(!msm_mdp_base))
1324 return -ENOMEM;
1325
1326 mdp_irq = platform_get_irq(pdev, 0);
1327 if (mdp_irq < 0) {
1328 pr_err("mdp: can not get mdp irq\n");
1329 return -ENOMEM;
1330 }
1331
1332 mdp_rev = mdp_pdata->mdp_rev;
1333 rc = mdp_irq_clk_setup();
1334
1335 if (rc)
1336 return rc;
1337
1338 mdp_hw_version();
1339
1340 /* initializing mdp hw */
1341#ifdef CONFIG_FB_MSM_MDP40
1342 mdp4_hw_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343#else
1344 mdp_hw_init();
1345#endif
1346
1347#ifdef CONFIG_FB_MSM_OVERLAY
1348 mdp_hw_cursor_init();
1349#endif
1350
1351 mdp_resource_initialized = 1;
1352 return 0;
1353 }
1354
1355 if (!mdp_resource_initialized)
1356 return -EPERM;
1357
1358 mfd = platform_get_drvdata(pdev);
1359
1360 if (!mfd)
1361 return -ENODEV;
1362
1363 if (mfd->key != MFD_KEY)
1364 return -EINVAL;
1365
1366 if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
1367 return -ENOMEM;
1368
1369 msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
1370 if (!msm_fb_dev)
1371 return -ENOMEM;
1372
1373 /* link to the latest pdev */
1374 mfd->pdev = msm_fb_dev;
Nagamalleswararao Ganjie8943492011-11-01 13:04:10 -07001375 mfd->mdp_rev = mdp_rev;
1376
1377 mfd->ov0_blt_state = 0;
1378 mfd->use_ov0_blt = 0 ;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001379
1380 /* add panel data */
1381 if (platform_device_add_data
1382 (msm_fb_dev, pdev->dev.platform_data,
1383 sizeof(struct msm_fb_panel_data))) {
1384 printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
1385 rc = -ENOMEM;
1386 goto mdp_probe_err;
1387 }
1388 /* data chain */
1389 pdata = msm_fb_dev->dev.platform_data;
1390 pdata->on = mdp_on;
1391 pdata->off = mdp_off;
1392 pdata->next = pdev;
1393
1394 mdp_prim_panel_type = mfd->panel.type;
1395 switch (mfd->panel.type) {
1396 case EXT_MDDI_PANEL:
1397 case MDDI_PANEL:
1398 case EBI2_PANEL:
1399 INIT_WORK(&mfd->dma_update_worker,
1400 mdp_lcd_update_workqueue_handler);
1401 INIT_WORK(&mfd->vsync_resync_worker,
1402 mdp_vsync_resync_workqueue_handler);
1403 mfd->hw_refresh = FALSE;
1404
1405 if (mfd->panel.type == EXT_MDDI_PANEL) {
1406 /* 15 fps -> 66 msec */
1407 mfd->refresh_timer_duration = (66 * HZ / 1000);
1408 } else {
1409 /* 24 fps -> 42 msec */
1410 mfd->refresh_timer_duration = (42 * HZ / 1000);
1411 }
1412
1413#ifdef CONFIG_FB_MSM_MDP22
1414 mfd->dma_fnc = mdp_dma2_update;
1415 mfd->dma = &dma2_data;
1416#else
1417 if (mfd->panel_info.pdest == DISPLAY_1) {
1418#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
1419 mfd->dma_fnc = mdp4_mddi_overlay;
1420 mfd->cursor_update = mdp4_mddi_overlay_cursor;
1421#else
1422 mfd->dma_fnc = mdp_dma2_update;
1423#endif
1424 mfd->dma = &dma2_data;
1425 mfd->lut_update = mdp_lut_update_nonlcdc;
1426 mfd->do_histogram = mdp_do_histogram;
1427 } else {
1428 mfd->dma_fnc = mdp_dma_s_update;
1429 mfd->dma = &dma_s_data;
1430 }
1431#endif
1432 if (mdp_pdata)
1433 mfd->vsync_gpio = mdp_pdata->gpio;
1434 else
1435 mfd->vsync_gpio = -1;
1436
1437#ifdef CONFIG_FB_MSM_MDP40
1438 if (mfd->panel.type == EBI2_PANEL)
1439 intf = EBI2_INTF;
1440 else
1441 intf = MDDI_INTF;
1442
1443 if (mfd->panel_info.pdest == DISPLAY_1)
1444 if_no = PRIMARY_INTF_SEL;
1445 else
1446 if_no = SECONDARY_INTF_SEL;
1447
1448 mdp4_display_intf_sel(if_no, intf);
1449#endif
1450 mdp_config_vsync(mfd);
1451 break;
1452
1453#ifdef CONFIG_FB_MSM_MIPI_DSI
1454 case MIPI_VIDEO_PANEL:
1455#ifndef CONFIG_FB_MSM_MDP303
1456 pdata->on = mdp4_dsi_video_on;
1457 pdata->off = mdp4_dsi_video_off;
1458 mfd->hw_refresh = TRUE;
1459 mfd->dma_fnc = mdp4_dsi_video_overlay;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -07001460 mfd->lut_update = mdp_lut_update_lcdc;
1461 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001462 if (mfd->panel_info.pdest == DISPLAY_1) {
1463 if_no = PRIMARY_INTF_SEL;
1464 mfd->dma = &dma2_data;
1465 } else {
1466 if_no = EXTERNAL_INTF_SEL;
1467 mfd->dma = &dma_e_data;
1468 }
1469 mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
1470#else
1471 pdata->on = mdp_dsi_video_on;
1472 pdata->off = mdp_dsi_video_off;
1473 mfd->hw_refresh = TRUE;
1474 mfd->dma_fnc = mdp_dsi_video_update;
1475 mfd->do_histogram = mdp_do_histogram;
1476 if (mfd->panel_info.pdest == DISPLAY_1)
1477 mfd->dma = &dma2_data;
1478 else {
1479 printk(KERN_ERR "Invalid Selection of destination panel\n");
1480 rc = -ENODEV;
1481 goto mdp_probe_err;
1482 }
1483
1484#endif
Adrian Salido-Morenod1b9d7a2011-10-14 18:18:51 -07001485 if (mdp_rev >= MDP_REV_40)
1486 mfd->cursor_update = mdp_hw_cursor_sync_update;
1487 else
1488 mfd->cursor_update = mdp_hw_cursor_update;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 break;
1490
1491 case MIPI_CMD_PANEL:
1492#ifndef CONFIG_FB_MSM_MDP303
1493 mfd->dma_fnc = mdp4_dsi_cmd_overlay;
1494#ifdef CONFIG_FB_MSM_MDP40
1495 mipi = &mfd->panel_info.mipi;
1496 configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
1497#endif
1498 if (mfd->panel_info.pdest == DISPLAY_1) {
1499 if_no = PRIMARY_INTF_SEL;
1500 mfd->dma = &dma2_data;
1501 } else {
1502 if_no = SECONDARY_INTF_SEL;
1503 mfd->dma = &dma_s_data;
1504 }
Carl Vanderlip18f63082011-07-22 12:32:33 -07001505 mfd->lut_update = mdp_lut_update_nonlcdc;
1506 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507 mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
1508#else
1509 mfd->dma_fnc = mdp_dma2_update;
1510 mfd->do_histogram = mdp_do_histogram;
1511 if (mfd->panel_info.pdest == DISPLAY_1)
1512 mfd->dma = &dma2_data;
1513 else {
1514 printk(KERN_ERR "Invalid Selection of destination panel\n");
1515 rc = -ENODEV;
1516 goto mdp_probe_err;
1517 }
1518#endif
1519 mdp_config_vsync(mfd);
1520 break;
1521#endif
1522
1523#ifdef CONFIG_FB_MSM_DTV
1524 case DTV_PANEL:
1525 pdata->on = mdp4_dtv_on;
1526 pdata->off = mdp4_dtv_off;
1527 mfd->hw_refresh = TRUE;
1528 mfd->cursor_update = mdp_hw_cursor_update;
1529 mfd->dma_fnc = mdp4_dtv_overlay;
1530 mfd->dma = &dma_e_data;
1531 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1532 break;
1533#endif
1534 case HDMI_PANEL:
1535 case LCDC_PANEL:
1536 pdata->on = mdp_lcdc_on;
1537 pdata->off = mdp_lcdc_off;
1538 mfd->hw_refresh = TRUE;
1539#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
1540 mfd->cursor_update = mdp_hw_cursor_sync_update;
1541#else
1542 mfd->cursor_update = mdp_hw_cursor_update;
1543#endif
1544#ifndef CONFIG_FB_MSM_MDP22
1545 mfd->lut_update = mdp_lut_update_lcdc;
1546 mfd->do_histogram = mdp_do_histogram;
1547#endif
1548#ifdef CONFIG_FB_MSM_OVERLAY
1549 mfd->dma_fnc = mdp4_lcdc_overlay;
1550#else
1551 mfd->dma_fnc = mdp_lcdc_update;
1552#endif
1553
1554#ifdef CONFIG_FB_MSM_MDP40
1555 configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
1556 * 23 / 20);
1557 if (mfd->panel.type == HDMI_PANEL) {
1558 mfd->dma = &dma_e_data;
1559 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
1560 } else {
1561 mfd->dma = &dma2_data;
1562 mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
1563 }
1564#else
1565 mfd->dma = &dma2_data;
1566 spin_lock_irqsave(&mdp_spin_lock, flag);
1567 mdp_intr_mask &= ~MDP_DMA_P_DONE;
1568 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
1569 spin_unlock_irqrestore(&mdp_spin_lock, flag);
1570#endif
1571 break;
1572
1573 case TV_PANEL:
1574#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
1575 pdata->on = mdp4_atv_on;
1576 pdata->off = mdp4_atv_off;
1577 mfd->dma_fnc = mdp4_atv_overlay;
1578 mfd->dma = &dma_e_data;
1579 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
1580#else
1581 pdata->on = mdp_dma3_on;
1582 pdata->off = mdp_dma3_off;
1583 mfd->hw_refresh = TRUE;
1584 mfd->dma_fnc = mdp_dma3_update;
1585 mfd->dma = &dma3_data;
1586#endif
1587 break;
1588
Vinay Kalia27020d12011-10-14 17:50:29 -07001589#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
1590 case WRITEBACK_PANEL:
1591 pdata->on = mdp4_overlay_writeback_on;
1592 pdata->off = mdp4_overlay_writeback_off;
1593 mfd->dma_fnc = mdp4_writeback_overlay;
1594 mfd->dma = &dma_e_data;
1595 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1596 break;
1597#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001598 default:
1599 printk(KERN_ERR "mdp_probe: unknown device type!\n");
1600 rc = -ENODEV;
1601 goto mdp_probe_err;
1602 }
1603#ifdef CONFIG_FB_MSM_MDP40
1604 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1605 mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
1606 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1607#endif
1608
1609#ifdef CONFIG_MSM_BUS_SCALING
1610 if (!mdp_bus_scale_handle && mdp_pdata &&
1611 mdp_pdata->mdp_bus_scale_table) {
1612 mdp_bus_scale_handle =
1613 msm_bus_scale_register_client(
1614 mdp_pdata->mdp_bus_scale_table);
1615 if (!mdp_bus_scale_handle) {
1616 printk(KERN_ERR "%s not able to get bus scale\n",
1617 __func__);
1618 return -ENOMEM;
1619 }
1620 }
1621#endif
1622 /* set driver data */
1623 platform_set_drvdata(msm_fb_dev, mfd);
1624
1625 rc = platform_device_add(msm_fb_dev);
1626 if (rc) {
1627 goto mdp_probe_err;
1628 }
1629
1630 pm_runtime_set_active(&pdev->dev);
1631 pm_runtime_enable(&pdev->dev);
1632
1633 pdev_list[pdev_list_cnt++] = pdev;
1634 mdp4_extn_disp = 0;
1635 return 0;
1636
1637 mdp_probe_err:
1638 platform_device_put(msm_fb_dev);
1639#ifdef CONFIG_MSM_BUS_SCALING
1640 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1641 mdp_bus_scale_handle > 0)
1642 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1643#endif
1644 return rc;
1645}
1646
1647#ifdef CONFIG_PM
1648static void mdp_suspend_sub(void)
1649{
1650 /* cancel pipe ctrl worker */
1651 cancel_delayed_work(&mdp_pipe_ctrl_worker);
1652
1653 /* for workder can't be cancelled... */
1654 flush_workqueue(mdp_pipe_ctrl_wq);
1655
1656 /* let's wait for PPP completion */
1657 while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
1658 cpu_relax();
1659
1660 /* try to power down */
1661 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1662
1663 mutex_lock(&mdp_suspend_mutex);
1664 mdp_suspended = TRUE;
1665 mutex_unlock(&mdp_suspend_mutex);
1666}
1667#endif
1668
1669#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1670static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
1671{
1672 if (pdev->id == 0) {
1673 mdp_suspend_sub();
1674 if (mdp_current_clk_on) {
1675 printk(KERN_WARNING"MDP suspend failed\n");
1676 return -EBUSY;
1677 }
1678 }
1679
1680 return 0;
1681}
1682#endif
1683
1684#ifdef CONFIG_HAS_EARLYSUSPEND
1685static void mdp_early_suspend(struct early_suspend *h)
1686{
1687 mdp_suspend_sub();
Ravishangar Kalyanamdf021cf2011-10-20 12:53:27 -07001688#ifdef CONFIG_FB_MSM_DTV
1689 mdp4_dtv_set_black_screen();
1690#endif
Ravishangar Kalyanama7c98912011-09-22 12:44:15 -07001691 if (footswitch && mdp_rev > MDP_REV_42)
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001692 regulator_disable(footswitch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001693}
1694
1695static void mdp_early_resume(struct early_suspend *h)
1696{
Ravishangar Kalyanama7c98912011-09-22 12:44:15 -07001697 if (footswitch && mdp_rev > MDP_REV_42)
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001698 regulator_enable(footswitch);
1699
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001700 mutex_lock(&mdp_suspend_mutex);
1701 mdp_suspended = FALSE;
1702 mutex_unlock(&mdp_suspend_mutex);
1703}
1704#endif
1705
1706static int mdp_remove(struct platform_device *pdev)
1707{
1708 if (footswitch != NULL)
1709 regulator_put(footswitch);
1710 iounmap(msm_mdp_base);
1711 pm_runtime_disable(&pdev->dev);
1712#ifdef CONFIG_MSM_BUS_SCALING
1713 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1714 mdp_bus_scale_handle > 0)
1715 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1716#endif
1717 return 0;
1718}
1719
1720static int mdp_register_driver(void)
1721{
1722#ifdef CONFIG_HAS_EARLYSUSPEND
1723 early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
1724 early_suspend.suspend = mdp_early_suspend;
1725 early_suspend.resume = mdp_early_resume;
1726 register_early_suspend(&early_suspend);
1727#endif
1728
1729 return platform_driver_register(&mdp_driver);
1730}
1731
1732static int __init mdp_driver_init(void)
1733{
1734 int ret;
1735
1736 mdp_drv_init();
1737
1738 ret = mdp_register_driver();
1739 if (ret) {
1740 printk(KERN_ERR "mdp_register_driver() failed!\n");
1741 return ret;
1742 }
1743
1744#if defined(CONFIG_DEBUG_FS)
1745 mdp_debugfs_init();
1746#endif
1747
1748 return 0;
1749
1750}
1751
1752module_init(mdp_driver_init);