blob: 03178570ff38b96dcd4f20190e8a2cf525ac3af8 [file] [log] [blame]
Pavel Machekd480ace2009-09-22 16:47:03 -07001/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
Nagamalleswararao Ganji285745f2011-12-29 18:31:46 -08005 * Copyright (c) 2007-2012, Code Aurora Forum. All rights reserved.
Pavel Machekd480ace2009-09-22 16:47:03 -07006 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/module.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070019#include <linux/kernel.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/sched.h>
21#include <linux/time.h>
22#include <linux/init.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070023#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <linux/spinlock.h>
25#include <linux/hrtimer.h>
Pavel Machekd480ace2009-09-22 16:47:03 -070026#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/hardware.h>
28#include <linux/io.h>
29#include <linux/debugfs.h>
30#include <linux/delay.h>
31#include <linux/mutex.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034#include <asm/system.h>
35#include <asm/mach-types.h>
36#include <linux/semaphore.h>
37#include <linux/uaccess.h>
38#include <mach/clk.h>
39#include "mdp.h"
40#include "msm_fb.h"
41#ifdef CONFIG_FB_MSM_MDP40
42#include "mdp4.h"
43#endif
44#include "mipi_dsi.h"
Pavel Machekd480ace2009-09-22 16:47:03 -070045
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046uint32 mdp4_extn_disp;
Pavel Machekd480ace2009-09-22 16:47:03 -070047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048static struct clk *mdp_clk;
49static struct clk *mdp_pclk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050static struct clk *mdp_lut_clk;
51int mdp_rev;
Pavel Machekd480ace2009-09-22 16:47:03 -070052
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -070053static struct regulator *footswitch;
Ravishangar Kalyanam7b879b62011-12-16 16:04:09 -080054static unsigned int mdp_footswitch_on;
Pavel Machekd480ace2009-09-22 16:47:03 -070055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056struct completion mdp_ppp_comp;
57struct semaphore mdp_ppp_mutex;
58struct semaphore mdp_pipe_ctrl_mutex;
Pavel Machekd480ace2009-09-22 16:47:03 -070059
kuogee hsieh562c58f2011-12-08 08:47:33 -080060unsigned long mdp_timer_duration = (HZ/20); /* 50 msecond */
Pavel Machekd480ace2009-09-22 16:47:03 -070061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062boolean mdp_ppp_waiting = FALSE;
63uint32 mdp_tv_underflow_cnt;
64uint32 mdp_lcdc_underflow_cnt;
65
66boolean mdp_current_clk_on = FALSE;
67boolean mdp_is_in_isr = FALSE;
68
69/*
70 * legacy mdp_in_processing is only for DMA2-MDDI
71 * this applies to DMA2 block only
72 */
73uint32 mdp_in_processing = FALSE;
74
75#ifdef CONFIG_FB_MSM_MDP40
76uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
77#else
78uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
79#endif
80
81MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
82
83atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
84
85spinlock_t mdp_spin_lock;
86struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
87struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
88
89static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
90static struct delayed_work mdp_pipe_ctrl_worker;
91
92static boolean mdp_suspended = FALSE;
93DEFINE_MUTEX(mdp_suspend_mutex);
94
95#ifdef CONFIG_FB_MSM_MDP40
96struct mdp_dma_data dma2_data;
97struct mdp_dma_data dma_s_data;
98struct mdp_dma_data dma_e_data;
99ulong mdp4_display_intf;
100#else
101static struct mdp_dma_data dma2_data;
102static struct mdp_dma_data dma_s_data;
103#ifndef CONFIG_FB_MSM_MDP303
104static struct mdp_dma_data dma_e_data;
105#endif
106#endif
Rajesh Sastrulab52368b2011-12-22 12:09:17 -0800107
108#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
109struct mdp_dma_data dma_wb_data;
110#endif
111
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112static struct mdp_dma_data dma3_data;
113
114extern ktime_t mdp_dma2_last_update_time;
115
116extern uint32 mdp_dma2_update_time_in_usec;
117extern int mdp_lcd_rd_cnt_offset_slow;
118extern int mdp_lcd_rd_cnt_offset_fast;
119extern int mdp_usec_diff_threshold;
120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121extern int first_pixel_start_x;
122extern int first_pixel_start_y;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123
124#ifdef MSM_FB_ENABLE_DBGFS
125struct dentry *mdp_dir;
126#endif
127
128#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
129static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
130#else
131#define mdp_suspend NULL
132#endif
133
134struct timeval mdp_dma2_timeval;
135struct timeval mdp_ppp_timeval;
136
137#ifdef CONFIG_HAS_EARLYSUSPEND
138static struct early_suspend early_suspend;
139#endif
140
141static u32 mdp_irq;
142
143static uint32 mdp_prim_panel_type = NO_PANEL;
144#ifndef CONFIG_FB_MSM_MDP22
145DEFINE_MUTEX(mdp_lut_push_sem);
146static int mdp_lut_i;
147static int mdp_lut_hw_update(struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700148{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 int i;
150 u16 *c[3];
151 u16 r, g, b;
Pavel Machekd480ace2009-09-22 16:47:03 -0700152
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 c[0] = cmap->green;
154 c[1] = cmap->blue;
155 c[2] = cmap->red;
Pavel Machekd480ace2009-09-22 16:47:03 -0700156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 for (i = 0; i < cmap->len; i++) {
158 if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
159 copy_from_user(&g, cmap->green++, sizeof(g)) ||
160 copy_from_user(&b, cmap->blue++, sizeof(b)))
161 return -EFAULT;
162
163#ifdef CONFIG_FB_MSM_MDP40
164 MDP_OUTP(MDP_BASE + 0x94800 +
165#else
166 MDP_OUTP(MDP_BASE + 0x93800 +
167#endif
168 (0x400*mdp_lut_i) + cmap->start*4 + i*4,
169 ((g & 0xff) |
170 ((b & 0xff) << 8) |
171 ((r & 0xff) << 16)));
Pavel Machekd480ace2009-09-22 16:47:03 -0700172 }
173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700175}
176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177static int mdp_lut_push;
178static int mdp_lut_push_i;
179static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
Pavel Machekd480ace2009-09-22 16:47:03 -0700180{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181 int ret;
182
183 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
184 ret = mdp_lut_hw_update(cmap);
185 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
186
187 if (ret)
188 return ret;
189
190 mutex_lock(&mdp_lut_push_sem);
191 mdp_lut_push = 1;
192 mdp_lut_push_i = mdp_lut_i;
193 mutex_unlock(&mdp_lut_push_sem);
194
195 mdp_lut_i = (mdp_lut_i + 1)%2;
196
197 return 0;
198}
199
200static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
201{
202 int ret;
203
204 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
205 ret = mdp_lut_hw_update(cmap);
206
207 if (ret) {
208 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
209 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -0700210 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211
212 MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
213 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
214 mdp_lut_i = (mdp_lut_i + 1)%2;
215
216 return 0;
217}
218
219static void mdp_lut_enable(void)
220{
221 if (mdp_lut_push) {
222 mutex_lock(&mdp_lut_push_sem);
223 mdp_lut_push = 0;
224 MDP_OUTP(MDP_BASE + 0x90070,
225 (mdp_lut_push_i << 10) | 0x17);
226 mutex_unlock(&mdp_lut_push_sem);
227 }
228}
229
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700230#define MDP_REV42_HIST_MAX_BIN 128
231#define MDP_REV41_HIST_MAX_BIN 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700234unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235struct completion mdp_hist_comp;
236boolean mdp_is_hist_start = FALSE;
237#else
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700238static unsigned int mdp_hist_frame_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239static struct completion mdp_hist_comp;
240static boolean mdp_is_hist_start = FALSE;
241#endif
242static DEFINE_MUTEX(mdp_hist_mutex);
Carl Vanderlipb2408032011-10-27 10:52:48 -0700243static boolean mdp_is_hist_data = FALSE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244
Carl Vanderlipb2408032011-10-27 10:52:48 -0700245/*should hold mdp_hist_mutex before calling this function*/
246int _mdp_histogram_ctrl(boolean en)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247{
248 unsigned long flag;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700249 unsigned long hist_base;
Carl Vanderlip4c4c2862011-10-27 11:28:55 -0700250 uint32_t status;
Carl Vanderlip13f48ed2011-10-27 13:44:31 -0700251
252 if (mdp_rev >= MDP_REV_40)
253 hist_base = 0x95000;
254 else
255 hist_base = 0x94000;
256
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700257 if (en == TRUE) {
Carl Vanderlipb2408032011-10-27 10:52:48 -0700258 if (mdp_is_hist_start)
259 return -EINVAL;
260
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700261 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
262 mdp_hist_frame_cnt = 1;
263 mdp_enable_irq(MDP_HISTOGRAM_TERM);
264 spin_lock_irqsave(&mdp_spin_lock, flag);
265 if (mdp_is_hist_start == FALSE && mdp_rev >= MDP_REV_40) {
266 MDP_OUTP(MDP_BASE + hist_base + 0x10, 1);
267 MDP_OUTP(MDP_BASE + hist_base + 0x1c, INTR_HIST_DONE);
268 }
269 spin_unlock_irqrestore(&mdp_spin_lock, flag);
270 MDP_OUTP(MDP_BASE + hist_base + 0x4, mdp_hist_frame_cnt);
271 MDP_OUTP(MDP_BASE + hist_base, 1);
272 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
Carl Vanderlipb2408032011-10-27 10:52:48 -0700273 mdp_is_hist_data = TRUE;
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700274 } else {
Carl Vanderlipb2408032011-10-27 10:52:48 -0700275 if (!mdp_is_hist_start && !mdp_is_hist_data)
276 return -EINVAL;
277
278 mdp_is_hist_data = FALSE;
279 complete(&mdp_hist_comp);
280
Carl Vanderlip4c4c2862011-10-27 11:28:55 -0700281 if (mdp_rev >= MDP_REV_40) {
282 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
283 status = inpdw(MDP_BASE + hist_base + 0x1C);
284 status &= ~INTR_HIST_DONE;
285 MDP_OUTP(MDP_BASE + hist_base + 0x1C, status);
286
287 MDP_OUTP(MDP_BASE + hist_base + 0x18, INTR_HIST_DONE);
288 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
289 FALSE);
290 }
291
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700292 mdp_disable_irq(MDP_HISTOGRAM_TERM);
Pavel Machekd480ace2009-09-22 16:47:03 -0700293 }
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700294
Pavel Machekd480ace2009-09-22 16:47:03 -0700295 return 0;
296}
297
Carl Vanderlipb2408032011-10-27 10:52:48 -0700298int mdp_histogram_ctrl(boolean en)
299{
300 int ret = 0;
301 mutex_lock(&mdp_hist_mutex);
302 ret = _mdp_histogram_ctrl(en);
303 mutex_unlock(&mdp_hist_mutex);
304 return ret;
305}
306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307int mdp_start_histogram(struct fb_info *info)
Pavel Machekd480ace2009-09-22 16:47:03 -0700308{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 unsigned long flag;
Pavel Machekd480ace2009-09-22 16:47:03 -0700310
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 int ret = 0;
312 mutex_lock(&mdp_hist_mutex);
313 if (mdp_is_hist_start == TRUE) {
314 printk(KERN_ERR "%s histogram already started\n", __func__);
315 ret = -EPERM;
316 goto mdp_hist_start_err;
317 }
318
Carl Vanderlipb2408032011-10-27 10:52:48 -0700319 ret = _mdp_histogram_ctrl(TRUE);
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 spin_lock_irqsave(&mdp_spin_lock, flag);
322 mdp_is_hist_start = TRUE;
323 spin_unlock_irqrestore(&mdp_spin_lock, flag);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324
325mdp_hist_start_err:
326 mutex_unlock(&mdp_hist_mutex);
327 return ret;
328
329}
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331int mdp_stop_histogram(struct fb_info *info)
332{
333 unsigned long flag;
334 int ret = 0;
335 mutex_lock(&mdp_hist_mutex);
336 if (!mdp_is_hist_start) {
337 printk(KERN_ERR "%s histogram already stopped\n", __func__);
338 ret = -EPERM;
339 goto mdp_hist_stop_err;
340 }
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 spin_lock_irqsave(&mdp_spin_lock, flag);
343 mdp_is_hist_start = FALSE;
344 spin_unlock_irqrestore(&mdp_spin_lock, flag);
Carl Vanderlipe19a2862011-10-27 11:26:24 -0700345
Carl Vanderlipb2408032011-10-27 10:52:48 -0700346 ret = _mdp_histogram_ctrl(FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347
348mdp_hist_stop_err:
349 mutex_unlock(&mdp_hist_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700350 return ret;
351}
352
Carl Vanderlipb2408032011-10-27 10:52:48 -0700353/*call from within mdp_hist_mutex*/
354static int _mdp_copy_hist_data(struct mdp_histogram *hist)
Pavel Machekd480ace2009-09-22 16:47:03 -0700355{
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700356 char *mdp_hist_base;
357 uint32 r_data_offset = 0x100, g_data_offset = 0x200;
358 uint32 b_data_offset = 0x300;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359 int ret = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700360
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700361 if (mdp_rev >= MDP_REV_42) {
362 mdp_hist_base = MDP_BASE + 0x95000;
363 r_data_offset = 0x400;
364 g_data_offset = 0x800;
365 b_data_offset = 0xc00;
366 } else if (mdp_rev >= MDP_REV_40 && mdp_rev <= MDP_REV_41) {
367 mdp_hist_base = MDP_BASE + 0x95000;
368 } else if (mdp_rev >= MDP_REV_30 && mdp_rev <= MDP_REV_31) {
369 mdp_hist_base = MDP_BASE + 0x94000;
370 } else {
371 pr_err("%s(): Unsupported MDP rev %u\n", __func__, mdp_rev);
372 return -EPERM;
373 }
374
375 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
376 if (hist->r) {
377 ret = copy_to_user(hist->r, mdp_hist_base + r_data_offset,
378 hist->bin_cnt * 4);
379 if (ret)
380 goto hist_err;
381 }
382 if (hist->g) {
383 ret = copy_to_user(hist->g, mdp_hist_base + g_data_offset,
384 hist->bin_cnt * 4);
385 if (ret)
386 goto hist_err;
387 }
388 if (hist->b) {
389 ret = copy_to_user(hist->b, mdp_hist_base + b_data_offset,
390 hist->bin_cnt * 4);
391 if (ret)
392 goto hist_err;
393 }
394
395 if (mdp_is_hist_start == TRUE) {
396 MDP_OUTP(mdp_hist_base + 0x004,
397 mdp_hist_frame_cnt);
398 MDP_OUTP(mdp_hist_base, 1);
399 }
400 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700401 return 0;
402
403hist_err:
404 printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
405 return ret;
406}
407
408static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
409{
Carl Vanderlipb2408032011-10-27 10:52:48 -0700410 int ret = 0;
411
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700412 if (!hist->frame_cnt || (hist->bin_cnt == 0))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 return -EINVAL;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -0700414
415 if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN)
416 || (mdp_rev == MDP_REV_42 &&
417 hist->bin_cnt > MDP_REV42_HIST_MAX_BIN))
418 return -EINVAL;
419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 mutex_lock(&mdp_hist_mutex);
Carl Vanderlipb2408032011-10-27 10:52:48 -0700421 if (!mdp_is_hist_data) {
422 ret = -EINVAL;
423 goto error;
424 }
425
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 if (!mdp_is_hist_start) {
427 printk(KERN_ERR "%s histogram not started\n", __func__);
Carl Vanderlipb2408032011-10-27 10:52:48 -0700428 ret = -EPERM;
429 goto error;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 INIT_COMPLETION(mdp_hist_comp);
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700433 mdp_hist_frame_cnt = hist->frame_cnt;
Carl Vanderlipb2408032011-10-27 10:52:48 -0700434 mutex_unlock(&mdp_hist_mutex);
435
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 wait_for_completion_killable(&mdp_hist_comp);
437
Carl Vanderlipb2408032011-10-27 10:52:48 -0700438 mutex_lock(&mdp_hist_mutex);
439 if (mdp_is_hist_data)
440 ret = _mdp_copy_hist_data(hist);
441error:
442 mutex_unlock(&mdp_hist_mutex);
443 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444}
445#endif
446
447/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
448
449int mdp_ppp_pipe_wait(void)
450{
451 int ret = 1;
452
453 /* wait 5 seconds for the operation to complete before declaring
454 the MDP hung */
455
456 if (mdp_ppp_waiting == TRUE) {
457 ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
458 5 * HZ);
459
460 if (!ret)
461 printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
462 __func__);
Pavel Machekd480ace2009-09-22 16:47:03 -0700463 }
464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 return ret;
466}
Pavel Machekd480ace2009-09-22 16:47:03 -0700467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468static DEFINE_SPINLOCK(mdp_lock);
469static int mdp_irq_mask;
470static int mdp_irq_enabled;
Pavel Machekd480ace2009-09-22 16:47:03 -0700471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472/*
473 * mdp_enable_irq: can not be called from isr
474 */
475void mdp_enable_irq(uint32 term)
476{
477 unsigned long irq_flags;
478
479 spin_lock_irqsave(&mdp_lock, irq_flags);
480 if (mdp_irq_mask & term) {
481 printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
482 __func__, term, mdp_irq_mask, mdp_irq_enabled);
483 } else {
484 mdp_irq_mask |= term;
485 if (mdp_irq_mask && !mdp_irq_enabled) {
486 mdp_irq_enabled = 1;
487 enable_irq(mdp_irq);
488 }
489 }
Pavel Machekd480ace2009-09-22 16:47:03 -0700490 spin_unlock_irqrestore(&mdp_lock, irq_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491}
492
493/*
494 * mdp_disable_irq: can not be called from isr
495 */
496void mdp_disable_irq(uint32 term)
497{
498 unsigned long irq_flags;
499
500 spin_lock_irqsave(&mdp_lock, irq_flags);
501 if (!(mdp_irq_mask & term)) {
502 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
503 __func__, term, mdp_irq_mask, mdp_irq_enabled);
504 } else {
505 mdp_irq_mask &= ~term;
506 if (!mdp_irq_mask && mdp_irq_enabled) {
507 mdp_irq_enabled = 0;
508 disable_irq(mdp_irq);
509 }
510 }
511 spin_unlock_irqrestore(&mdp_lock, irq_flags);
512}
513
514void mdp_disable_irq_nosync(uint32 term)
515{
516 spin_lock(&mdp_lock);
517 if (!(mdp_irq_mask & term)) {
518 printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
519 __func__, term, mdp_irq_mask, mdp_irq_enabled);
520 } else {
521 mdp_irq_mask &= ~term;
522 if (!mdp_irq_mask && mdp_irq_enabled) {
523 mdp_irq_enabled = 0;
524 disable_irq_nosync(mdp_irq);
525 }
526 }
527 spin_unlock(&mdp_lock);
528}
529
530void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
531{
532 /* complete all the writes before starting */
533 wmb();
534
535 /* kick off PPP engine */
536 if (term == MDP_PPP_TERM) {
537 if (mdp_debug[MDP_PPP_BLOCK])
538 jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
539
540 /* let's turn on PPP block */
541 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
542
543 mdp_enable_irq(term);
544 INIT_COMPLETION(mdp_ppp_comp);
545 mdp_ppp_waiting = TRUE;
546 outpdw(MDP_BASE + 0x30, 0x1000);
547 wait_for_completion_killable(&mdp_ppp_comp);
548 mdp_disable_irq(term);
549
550 if (mdp_debug[MDP_PPP_BLOCK]) {
551 struct timeval now;
552
553 jiffies_to_timeval(jiffies, &now);
554 mdp_ppp_timeval.tv_usec =
555 now.tv_usec - mdp_ppp_timeval.tv_usec;
556 MSM_FB_DEBUG("MDP-PPP: %d\n",
557 (int)mdp_ppp_timeval.tv_usec);
558 }
559 } else if (term == MDP_DMA2_TERM) {
560 if (mdp_debug[MDP_DMA2_BLOCK]) {
561 MSM_FB_DEBUG("MDP-DMA2: %d\n",
562 (int)mdp_dma2_timeval.tv_usec);
563 jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
564 }
565 /* DMA update timestamp */
566 mdp_dma2_last_update_time = ktime_get_real();
567 /* let's turn on DMA2 block */
568#if 0
569 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
570#endif
571#ifdef CONFIG_FB_MSM_MDP22
572 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
573#else
574 mdp_lut_enable();
575
576#ifdef CONFIG_FB_MSM_MDP40
577 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
578#else
579 outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
580
581#ifdef CONFIG_FB_MSM_MDP303
582
583#ifdef CONFIG_FB_MSM_MIPI_DSI
kuogee hsieh8717a172011-09-05 09:57:58 -0700584 mipi_dsi_cmd_mdp_start();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585#endif
586
587#endif
588
589#endif
590#endif
591#ifdef CONFIG_FB_MSM_MDP40
592 } else if (term == MDP_DMA_S_TERM) {
593 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
594 outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
595 } else if (term == MDP_DMA_E_TERM) {
596 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
597 outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
598 } else if (term == MDP_OVERLAY0_TERM) {
599 mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
600 mdp_lut_enable();
601 outpdw(MDP_BASE + 0x0004, 0);
602 } else if (term == MDP_OVERLAY1_TERM) {
603 mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
604 mdp_lut_enable();
605 outpdw(MDP_BASE + 0x0008, 0);
Rajesh Sastrulab52368b2011-12-22 12:09:17 -0800606 } else if (term == MDP_OVERLAY2_TERM) {
607 mdp_pipe_ctrl(MDP_OVERLAY2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
608 mdp_lut_enable();
609 outpdw(MDP_BASE + 0x00D0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 }
611#else
612 } else if (term == MDP_DMA_S_TERM) {
613 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
614 outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
615 } else if (term == MDP_DMA_E_TERM) {
616 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
617 outpdw(MDP_BASE + 0x004C, 0x0);
618 }
619#endif
620}
621static int mdp_clk_rate;
622static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
623static int pdev_list_cnt;
624
625static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
626{
627 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
628}
629void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
630 boolean isr)
631{
632 boolean mdp_all_blocks_off = TRUE;
633 int i;
634 unsigned long flag;
635 struct msm_fb_panel_data *pdata;
636
637 /*
638 * It is assumed that if isr = TRUE then start = OFF
639 * if start = ON when isr = TRUE it could happen that the usercontext
640 * could turn off the clocks while the interrupt is updating the
641 * power to ON
642 */
643 WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
644
645 spin_lock_irqsave(&mdp_spin_lock, flag);
646 if (MDP_BLOCK_POWER_ON == state) {
647 atomic_inc(&mdp_block_power_cnt[block]);
648
649 if (MDP_DMA2_BLOCK == block)
650 mdp_in_processing = TRUE;
651 } else {
652 atomic_dec(&mdp_block_power_cnt[block]);
653
654 if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
655 /*
656 * Master has to serve a request to power off MDP always
657 * It also has a timer to power off. So, in case of
658 * timer expires first and DMA2 finishes later,
659 * master has to power off two times
660 * There shouldn't be multiple power-off request for
661 * other blocks
662 */
663 if (block != MDP_MASTER_BLOCK) {
664 MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
665 multiple power-off request\n", block);
666 }
667 atomic_set(&mdp_block_power_cnt[block], 0);
668 }
669
670 if (MDP_DMA2_BLOCK == block)
671 mdp_in_processing = FALSE;
672 }
673 spin_unlock_irqrestore(&mdp_spin_lock, flag);
674
675 /*
676 * If it's in isr, we send our request to workqueue.
677 * Otherwise, processing happens in the current context
678 */
679 if (isr) {
680 if (mdp_current_clk_on) {
681 /* checking all blocks power state */
682 for (i = 0; i < MDP_MAX_BLOCK; i++) {
683 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
684 mdp_all_blocks_off = FALSE;
685 break;
686 }
687 }
688
689 if (mdp_all_blocks_off) {
690 /* send workqueue to turn off mdp power */
691 queue_delayed_work(mdp_pipe_ctrl_wq,
692 &mdp_pipe_ctrl_worker,
693 mdp_timer_duration);
694 }
695 }
696 } else {
697 down(&mdp_pipe_ctrl_mutex);
698 /* checking all blocks power state */
699 for (i = 0; i < MDP_MAX_BLOCK; i++) {
700 if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
701 mdp_all_blocks_off = FALSE;
702 break;
703 }
704 }
705
706 /*
707 * find out whether a delayable work item is currently
708 * pending
709 */
710
711 if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
712 /*
713 * try to cancel the current work if it fails to
714 * stop (which means del_timer can't delete it
715 * from the list, it's about to expire and run),
716 * we have to let it run. queue_delayed_work won't
717 * accept the next job which is same as
718 * queue_delayed_work(mdp_timer_duration = 0)
719 */
720 cancel_delayed_work(&mdp_pipe_ctrl_worker);
721 }
722
723 if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
724 mutex_lock(&mdp_suspend_mutex);
725 if (block == MDP_MASTER_BLOCK || mdp_suspended) {
726 mdp_current_clk_on = FALSE;
727 mb();
728 /* turn off MDP clks */
729 mdp_vsync_clk_disable();
730 for (i = 0; i < pdev_list_cnt; i++) {
731 pdata = (struct msm_fb_panel_data *)
732 pdev_list[i]->dev.platform_data;
733 if (pdata && pdata->clk_func)
734 pdata->clk_func(0);
735 }
736 if (mdp_clk != NULL) {
737 mdp_clk_rate = clk_get_rate(mdp_clk);
738 clk_disable(mdp_clk);
739 if (mdp_hw_revision <=
740 MDP4_REVISION_V2_1 &&
741 mdp_clk_rate > 122880000) {
742 clk_set_rate(mdp_clk,
743 122880000);
744 }
745 MSM_FB_DEBUG("MDP CLK OFF\n");
746 }
747 if (mdp_pclk != NULL) {
748 clk_disable(mdp_pclk);
749 MSM_FB_DEBUG("MDP PCLK OFF\n");
750 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751 if (mdp_lut_clk != NULL)
752 clk_disable(mdp_lut_clk);
753 } else {
754 /* send workqueue to turn off mdp power */
755 queue_delayed_work(mdp_pipe_ctrl_wq,
756 &mdp_pipe_ctrl_worker,
757 mdp_timer_duration);
758 }
759 mutex_unlock(&mdp_suspend_mutex);
760 } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
761 mdp_current_clk_on = TRUE;
762 /* turn on MDP clks */
763 for (i = 0; i < pdev_list_cnt; i++) {
764 pdata = (struct msm_fb_panel_data *)
765 pdev_list[i]->dev.platform_data;
766 if (pdata && pdata->clk_func)
767 pdata->clk_func(1);
768 }
769 if (mdp_clk != NULL) {
770 if (mdp_hw_revision <=
771 MDP4_REVISION_V2_1 &&
772 mdp_clk_rate > 122880000) {
773 clk_set_rate(mdp_clk,
774 mdp_clk_rate);
775 }
776 clk_enable(mdp_clk);
777 MSM_FB_DEBUG("MDP CLK ON\n");
778 }
779 if (mdp_pclk != NULL) {
780 clk_enable(mdp_pclk);
781 MSM_FB_DEBUG("MDP PCLK ON\n");
782 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783 if (mdp_lut_clk != NULL)
784 clk_enable(mdp_lut_clk);
785 mdp_vsync_clk_enable();
786 }
787 up(&mdp_pipe_ctrl_mutex);
788 }
789}
790
791#ifndef CONFIG_FB_MSM_MDP40
792irqreturn_t mdp_isr(int irq, void *ptr)
793{
794 uint32 mdp_interrupt = 0;
795 struct mdp_dma_data *dma;
796
797 mdp_is_in_isr = TRUE;
798 do {
799 mdp_interrupt = inp32(MDP_INTR_STATUS);
800 outp32(MDP_INTR_CLEAR, mdp_interrupt);
801
802 mdp_interrupt &= mdp_intr_mask;
803
804 if (mdp_interrupt & TV_ENC_UNDERRUN) {
805 mdp_interrupt &= ~(TV_ENC_UNDERRUN);
806 mdp_tv_underflow_cnt++;
807 }
808
809 if (!mdp_interrupt)
810 break;
811
812 /* DMA3 TV-Out Start */
813 if (mdp_interrupt & TV_OUT_DMA3_START) {
814 /* let's disable TV out interrupt */
815 mdp_intr_mask &= ~TV_OUT_DMA3_START;
816 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
817
818 dma = &dma3_data;
819 if (dma->waiting) {
820 dma->waiting = FALSE;
821 complete(&dma->comp);
822 }
823 }
824#ifndef CONFIG_FB_MSM_MDP22
825 if (mdp_interrupt & MDP_HIST_DONE) {
826 outp32(MDP_BASE + 0x94018, 0x3);
827 outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 complete(&mdp_hist_comp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 }
830
831 /* LCDC UnderFlow */
832 if (mdp_interrupt & LCDC_UNDERFLOW) {
833 mdp_lcdc_underflow_cnt++;
834 /*when underflow happens HW resets all the histogram
835 registers that were set before so restore them back
836 to normal.*/
837 MDP_OUTP(MDP_BASE + 0x94010, 1);
838 MDP_OUTP(MDP_BASE + 0x9401c, 2);
839 if (mdp_is_hist_start == TRUE) {
840 MDP_OUTP(MDP_BASE + 0x94004,
Ravishangar Kalyanam8fef09a2011-08-09 17:36:23 -0700841 mdp_hist_frame_cnt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842 MDP_OUTP(MDP_BASE + 0x94000, 1);
843 }
844 }
845 /* LCDC Frame Start */
846 if (mdp_interrupt & LCDC_FRAME_START) {
847 /* let's disable LCDC interrupt */
848 mdp_intr_mask &= ~LCDC_FRAME_START;
849 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
850
851 dma = &dma2_data;
852 if (dma->waiting) {
853 dma->waiting = FALSE;
854 complete(&dma->comp);
855 }
856 }
857
858 /* DMA2 LCD-Out Complete */
859 if (mdp_interrupt & MDP_DMA_S_DONE) {
860 dma = &dma_s_data;
861 dma->busy = FALSE;
862 mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
863 TRUE);
864 complete(&dma->comp);
865 }
866 /* DMA_E LCD-Out Complete */
867 if (mdp_interrupt & MDP_DMA_E_DONE) {
868 dma = &dma_s_data;
869 dma->busy = FALSE;
870 mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
871 TRUE);
872 complete(&dma->comp);
873 }
874
875#endif
876
877 /* DMA2 LCD-Out Complete */
878 if (mdp_interrupt & MDP_DMA_P_DONE) {
879 struct timeval now;
880
881 mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
882 mdp_dma2_last_update_time);
883 if (mdp_debug[MDP_DMA2_BLOCK]) {
884 jiffies_to_timeval(jiffies, &now);
885 mdp_dma2_timeval.tv_usec =
886 now.tv_usec - mdp_dma2_timeval.tv_usec;
887 }
888#ifndef CONFIG_FB_MSM_MDP303
889 dma = &dma2_data;
890 dma->busy = FALSE;
891 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
892 TRUE);
893 complete(&dma->comp);
894#else
895 if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
896 dma = &dma2_data;
897 dma->busy = FALSE;
898 mdp_pipe_ctrl(MDP_DMA2_BLOCK,
899 MDP_BLOCK_POWER_OFF, TRUE);
900 complete(&dma->comp);
901 }
902#endif
903 }
904 /* PPP Complete */
905 if (mdp_interrupt & MDP_PPP_DONE) {
906#ifdef CONFIG_FB_MSM_MDP31
907 MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
908#endif
909 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
910 if (mdp_ppp_waiting) {
911 mdp_ppp_waiting = FALSE;
912 complete(&mdp_ppp_comp);
913 }
914 }
915 } while (1);
916
917 mdp_is_in_isr = FALSE;
918
Pavel Machekd480ace2009-09-22 16:47:03 -0700919 return IRQ_HANDLED;
920}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923static void mdp_drv_init(void)
Pavel Machekd480ace2009-09-22 16:47:03 -0700924{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 int i;
Pavel Machekd480ace2009-09-22 16:47:03 -0700926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927 for (i = 0; i < MDP_MAX_BLOCK; i++) {
928 mdp_debug[i] = 0;
Pavel Machekd480ace2009-09-22 16:47:03 -0700929 }
930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931 /* initialize spin lock and workqueue */
932 spin_lock_init(&mdp_spin_lock);
933 mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
934 mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
935 mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
936 INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
937 mdp_pipe_ctrl_workqueue_handler);
Pavel Machekd480ace2009-09-22 16:47:03 -0700938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 /* initialize semaphore */
940 init_completion(&mdp_ppp_comp);
941 sema_init(&mdp_ppp_mutex, 1);
942 sema_init(&mdp_pipe_ctrl_mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 dma2_data.busy = FALSE;
945 dma2_data.dmap_busy = FALSE;
946 dma2_data.waiting = FALSE;
947 init_completion(&dma2_data.comp);
948 init_completion(&dma2_data.dmap_comp);
949 sema_init(&dma2_data.mutex, 1);
950 mutex_init(&dma2_data.ov_mutex);
Pavel Machekd480ace2009-09-22 16:47:03 -0700951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 dma3_data.busy = FALSE;
953 dma3_data.waiting = FALSE;
954 init_completion(&dma3_data.comp);
955 sema_init(&dma3_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700956
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957 dma_s_data.busy = FALSE;
958 dma_s_data.waiting = FALSE;
959 init_completion(&dma_s_data.comp);
960 sema_init(&dma_s_data.mutex, 1);
Pavel Machekd480ace2009-09-22 16:47:03 -0700961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962#ifndef CONFIG_FB_MSM_MDP303
963 dma_e_data.busy = FALSE;
964 dma_e_data.waiting = FALSE;
965 init_completion(&dma_e_data.comp);
966 mutex_init(&dma_e_data.ov_mutex);
967#endif
Rajesh Sastrulab52368b2011-12-22 12:09:17 -0800968#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
969 dma_wb_data.busy = FALSE;
970 dma_wb_data.waiting = FALSE;
971 init_completion(&dma_wb_data.comp);
972 mutex_init(&dma_wb_data.ov_mutex);
973#endif
974
975
Pavel Machekd480ace2009-09-22 16:47:03 -0700976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977#ifndef CONFIG_FB_MSM_MDP22
978 init_completion(&mdp_hist_comp);
979#endif
Pavel Machekd480ace2009-09-22 16:47:03 -0700980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 /* initializing mdp power block counter to 0 */
982 for (i = 0; i < MDP_MAX_BLOCK; i++) {
983 atomic_set(&mdp_block_power_cnt[i], 0);
Pavel Machekd480ace2009-09-22 16:47:03 -0700984 }
985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986#ifdef MSM_FB_ENABLE_DBGFS
987 {
988 struct dentry *root;
989 char sub_name[] = "mdp";
Pavel Machekd480ace2009-09-22 16:47:03 -0700990
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 root = msm_fb_get_debugfs_root();
992 if (root != NULL) {
993 mdp_dir = debugfs_create_dir(sub_name, root);
994
995 if (mdp_dir) {
996 msm_fb_debugfs_file_create(mdp_dir,
997 "dma2_update_time_in_usec",
998 (u32 *) &mdp_dma2_update_time_in_usec);
999 msm_fb_debugfs_file_create(mdp_dir,
1000 "vs_rdcnt_slow",
1001 (u32 *) &mdp_lcd_rd_cnt_offset_slow);
1002 msm_fb_debugfs_file_create(mdp_dir,
1003 "vs_rdcnt_fast",
1004 (u32 *) &mdp_lcd_rd_cnt_offset_fast);
1005 msm_fb_debugfs_file_create(mdp_dir,
1006 "mdp_usec_diff_threshold",
1007 (u32 *) &mdp_usec_diff_threshold);
1008 msm_fb_debugfs_file_create(mdp_dir,
1009 "mdp_current_clk_on",
1010 (u32 *) &mdp_current_clk_on);
1011#ifdef CONFIG_FB_MSM_LCDC
1012 msm_fb_debugfs_file_create(mdp_dir,
1013 "lcdc_start_x",
1014 (u32 *) &first_pixel_start_x);
1015 msm_fb_debugfs_file_create(mdp_dir,
1016 "lcdc_start_y",
1017 (u32 *) &first_pixel_start_y);
1018#endif
1019 }
Pavel Machekd480ace2009-09-22 16:47:03 -07001020 }
Pavel Machekd480ace2009-09-22 16:47:03 -07001021 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001022#endif
1023}
1024
1025static int mdp_probe(struct platform_device *pdev);
1026static int mdp_remove(struct platform_device *pdev);
1027
1028static int mdp_runtime_suspend(struct device *dev)
1029{
1030 dev_dbg(dev, "pm_runtime: suspending...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -07001031 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -07001032}
1033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034static int mdp_runtime_resume(struct device *dev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001035{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 dev_dbg(dev, "pm_runtime: resuming...\n");
Pavel Machekd480ace2009-09-22 16:47:03 -07001037 return 0;
Pavel Machekd480ace2009-09-22 16:47:03 -07001038}
1039
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040static struct dev_pm_ops mdp_dev_pm_ops = {
1041 .runtime_suspend = mdp_runtime_suspend,
1042 .runtime_resume = mdp_runtime_resume,
Pavel Machekd480ace2009-09-22 16:47:03 -07001043};
1044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045
1046static struct platform_driver mdp_driver = {
1047 .probe = mdp_probe,
1048 .remove = mdp_remove,
1049#ifndef CONFIG_HAS_EARLYSUSPEND
1050 .suspend = mdp_suspend,
1051 .resume = NULL,
1052#endif
1053 .shutdown = NULL,
1054 .driver = {
1055 /*
1056 * Driver name must match the device name added in
1057 * platform.c.
1058 */
1059 .name = "mdp",
1060 .pm = &mdp_dev_pm_ops,
1061 },
1062};
1063
1064static int mdp_off(struct platform_device *pdev)
Pavel Machekd480ace2009-09-22 16:47:03 -07001065{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066 int ret = 0;
1067 mdp_histogram_ctrl(FALSE);
1068
1069 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1070 ret = panel_next_off(pdev);
1071 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1072
1073 return ret;
Pavel Machekd480ace2009-09-22 16:47:03 -07001074}
1075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076static int mdp_on(struct platform_device *pdev)
1077{
1078 int ret = 0;
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001080#ifdef CONFIG_FB_MSM_MDP40
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001081 struct msm_fb_data_type *mfd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1083 if (is_mdp4_hw_reset()) {
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001084 mfd = platform_get_drvdata(pdev);
1085 mdp_vsync_cfg_regs(mfd, FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086 mdp4_hw_init();
1087 outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
1088 }
1089 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1090#endif
1091 mdp_histogram_ctrl(TRUE);
1092
1093 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1094 ret = panel_next_on(pdev);
1095 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1096 return ret;
1097}
1098
1099static int mdp_resource_initialized;
1100static struct msm_panel_common_pdata *mdp_pdata;
1101
1102uint32 mdp_hw_revision;
1103
1104/*
1105 * mdp_hw_revision:
1106 * 0 == V1
1107 * 1 == V2
1108 * 2 == V2.1
1109 *
1110 */
1111void mdp_hw_version(void)
1112{
1113 char *cp;
1114 uint32 *hp;
1115
1116 if (mdp_pdata == NULL)
1117 return;
1118
1119 mdp_hw_revision = MDP4_REVISION_NONE;
1120 if (mdp_pdata->hw_revision_addr == 0)
1121 return;
1122
1123 /* tlmmgpio2 shadow */
1124 cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
1125
1126 if (cp == NULL)
1127 return;
1128
1129 hp = (uint32 *)cp; /* HW_REVISION_NUMBER */
1130 mdp_hw_revision = *hp;
1131 iounmap(cp);
1132
1133 mdp_hw_revision >>= 28; /* bit 31:28 */
1134 mdp_hw_revision &= 0x0f;
1135
1136 MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
1137 __func__, mdp_hw_revision);
1138}
1139
1140#ifdef CONFIG_FB_MSM_MDP40
1141static void configure_mdp_core_clk_table(uint32 min_clk_rate)
1142{
1143 uint8 count;
1144 uint32 current_rate;
Matt Wagantalla12cc952011-11-08 18:14:50 -08001145 if (mdp_clk && mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1146 min_clk_rate = clk_round_rate(mdp_clk, min_clk_rate);
1147 if (clk_set_rate(mdp_clk, min_clk_rate) < 0)
1148 printk(KERN_ERR "%s: clk_set_rate failed\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 __func__);
1150 else {
1151 count = 0;
1152 current_rate = clk_get_rate(mdp_clk);
1153 while (count < mdp_pdata->num_mdp_clk) {
1154 if (mdp_pdata->mdp_core_clk_table[count]
1155 < current_rate) {
1156 mdp_pdata->
1157 mdp_core_clk_table[count] =
1158 current_rate;
1159 }
1160 count++;
1161 }
1162 }
1163 }
1164}
1165#endif
1166
1167#ifdef CONFIG_MSM_BUS_SCALING
1168static uint32_t mdp_bus_scale_handle;
1169int mdp_bus_scale_update_request(uint32_t index)
1170{
1171 if (!mdp_pdata && (!mdp_pdata->mdp_bus_scale_table
1172 || index > (mdp_pdata->mdp_bus_scale_table->num_usecases - 1))) {
1173 printk(KERN_ERR "%s invalid table or index\n", __func__);
1174 return -EINVAL;
1175 }
1176 if (mdp_bus_scale_handle < 1) {
1177 printk(KERN_ERR "%s invalid bus handle\n", __func__);
1178 return -EINVAL;
1179 }
1180 return msm_bus_scale_client_update_request(mdp_bus_scale_handle,
1181 index);
1182}
1183#endif
1184DEFINE_MUTEX(mdp_clk_lock);
1185int mdp_set_core_clk(uint16 perf_level)
1186{
1187 int ret = -EINVAL;
1188 if (mdp_clk && mdp_pdata
1189 && mdp_pdata->mdp_core_clk_table) {
1190 if (perf_level > mdp_pdata->num_mdp_clk)
1191 printk(KERN_ERR "%s invalid perf level\n", __func__);
1192 else {
1193 mutex_lock(&mdp_clk_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194 ret = clk_set_rate(mdp_clk,
1195 mdp_pdata->
1196 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1197 - perf_level]);
1198 mutex_unlock(&mdp_clk_lock);
1199 if (ret) {
1200 printk(KERN_ERR "%s unable to set mdp_core_clk rate\n",
1201 __func__);
1202 }
1203 }
1204 }
1205 return ret;
1206}
1207
1208unsigned long mdp_get_core_clk(void)
1209{
1210 unsigned long clk_rate = 0;
1211 if (mdp_clk) {
1212 mutex_lock(&mdp_clk_lock);
1213 clk_rate = clk_get_rate(mdp_clk);
1214 mutex_unlock(&mdp_clk_lock);
1215 }
1216
1217 return clk_rate;
1218}
1219
1220unsigned long mdp_perf_level2clk_rate(uint32 perf_level)
1221{
1222 unsigned long clk_rate = 0;
1223
1224 if (mdp_pdata && mdp_pdata->mdp_core_clk_table) {
1225 if (perf_level > mdp_pdata->num_mdp_clk) {
1226 printk(KERN_ERR "%s invalid perf level\n", __func__);
1227 clk_rate = mdp_get_core_clk();
1228 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 clk_rate = mdp_pdata->
1230 mdp_core_clk_table[mdp_pdata->num_mdp_clk
1231 - perf_level];
1232 }
1233 } else
1234 clk_rate = mdp_get_core_clk();
1235
1236 return clk_rate;
1237}
1238
1239static int mdp_irq_clk_setup(void)
1240{
1241 int ret;
1242
1243#ifdef CONFIG_FB_MSM_MDP40
1244 ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
1245#else
1246 ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
1247#endif
1248 if (ret) {
1249 printk(KERN_ERR "mdp request_irq() failed!\n");
1250 return ret;
1251 }
1252 disable_irq(mdp_irq);
1253
1254 footswitch = regulator_get(NULL, "fs_mdp");
1255 if (IS_ERR(footswitch))
1256 footswitch = NULL;
Ravishangar Kalyanam7b879b62011-12-16 16:04:09 -08001257 else {
Ravishangar Kalyanam419051b2011-08-31 19:07:53 -07001258 regulator_enable(footswitch);
Ravishangar Kalyanam7b879b62011-12-16 16:04:09 -08001259 mdp_footswitch_on = 1;
1260 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261
1262 mdp_clk = clk_get(NULL, "mdp_clk");
1263 if (IS_ERR(mdp_clk)) {
1264 ret = PTR_ERR(mdp_clk);
1265 printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
1266 free_irq(mdp_irq, 0);
1267 return ret;
1268 }
1269
1270 mdp_pclk = clk_get(NULL, "mdp_pclk");
1271 if (IS_ERR(mdp_pclk))
1272 mdp_pclk = NULL;
1273
1274 if (mdp_rev == MDP_REV_42) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 mdp_lut_clk = clk_get(NULL, "lut_mdp");
1276 if (IS_ERR(mdp_lut_clk)) {
1277 ret = PTR_ERR(mdp_lut_clk);
1278 pr_err("can't get mdp_clk error:%d!\n", ret);
1279 clk_put(mdp_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 free_irq(mdp_irq, 0);
1281 return ret;
1282 }
1283 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001284 mdp_lut_clk = NULL;
1285 }
1286
1287#ifdef CONFIG_FB_MSM_MDP40
1288 /*
1289 * mdp_clk should greater than mdp_pclk always
1290 */
1291 if (mdp_pdata && mdp_pdata->mdp_core_clk_rate) {
1292 mutex_lock(&mdp_clk_lock);
1293 clk_set_rate(mdp_clk, mdp_pdata->mdp_core_clk_rate);
1294 if (mdp_lut_clk != NULL)
1295 clk_set_rate(mdp_lut_clk, mdp_pdata->mdp_core_clk_rate);
1296 mutex_unlock(&mdp_clk_lock);
1297 }
1298 MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
1299#endif
1300 return 0;
1301}
1302
1303static int mdp_probe(struct platform_device *pdev)
1304{
1305 struct platform_device *msm_fb_dev = NULL;
1306 struct msm_fb_data_type *mfd;
1307 struct msm_fb_panel_data *pdata = NULL;
1308 int rc;
1309 resource_size_t size ;
1310#ifdef CONFIG_FB_MSM_MDP40
1311 int intf, if_no;
1312#else
1313 unsigned long flag;
1314#endif
1315#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
1316 struct mipi_panel_info *mipi;
1317#endif
1318
1319 if ((pdev->id == 0) && (pdev->num_resources > 0)) {
Huaibin Yang91018362011-11-29 14:57:43 -08001320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001321 mdp_pdata = pdev->dev.platform_data;
1322
1323 size = resource_size(&pdev->resource[0]);
1324 msm_mdp_base = ioremap(pdev->resource[0].start, size);
1325
1326 MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
1327 (int)pdev->resource[0].start, (int)msm_mdp_base);
1328
1329 if (unlikely(!msm_mdp_base))
1330 return -ENOMEM;
1331
1332 mdp_irq = platform_get_irq(pdev, 0);
1333 if (mdp_irq < 0) {
1334 pr_err("mdp: can not get mdp irq\n");
1335 return -ENOMEM;
1336 }
1337
1338 mdp_rev = mdp_pdata->mdp_rev;
1339 rc = mdp_irq_clk_setup();
1340
1341 if (rc)
1342 return rc;
1343
1344 mdp_hw_version();
1345
1346 /* initializing mdp hw */
1347#ifdef CONFIG_FB_MSM_MDP40
1348 mdp4_hw_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001349#else
1350 mdp_hw_init();
1351#endif
1352
1353#ifdef CONFIG_FB_MSM_OVERLAY
1354 mdp_hw_cursor_init();
1355#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356 mdp_resource_initialized = 1;
1357 return 0;
1358 }
1359
1360 if (!mdp_resource_initialized)
1361 return -EPERM;
1362
1363 mfd = platform_get_drvdata(pdev);
1364
1365 if (!mfd)
1366 return -ENODEV;
1367
1368 if (mfd->key != MFD_KEY)
1369 return -EINVAL;
1370
1371 if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
1372 return -ENOMEM;
1373
1374 msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
1375 if (!msm_fb_dev)
1376 return -ENOMEM;
1377
1378 /* link to the latest pdev */
1379 mfd->pdev = msm_fb_dev;
Nagamalleswararao Ganjie8943492011-11-01 13:04:10 -07001380 mfd->mdp_rev = mdp_rev;
1381
Nagamalleswararao Ganji880f8472011-12-14 03:52:28 -08001382 mfd->ov0_wb_buf = MDP_ALLOC(sizeof(struct mdp_buf_type));
1383 mfd->ov1_wb_buf = MDP_ALLOC(sizeof(struct mdp_buf_type));
1384 memset((void *)mfd->ov0_wb_buf, 0, sizeof(struct mdp_buf_type));
1385 memset((void *)mfd->ov1_wb_buf, 0, sizeof(struct mdp_buf_type));
1386
1387 if (mdp_pdata) {
1388 mfd->ov0_wb_buf->size = mdp_pdata->ov0_wb_size;
1389 mfd->ov1_wb_buf->size = mdp_pdata->ov1_wb_size;
1390 mfd->mem_hid = mdp_pdata->mem_hid;
1391 } else {
1392 mfd->ov0_wb_buf->size = 0;
1393 mfd->ov1_wb_buf->size = 0;
1394 mfd->mem_hid = 0;
1395 }
1396
Nagamalleswararao Ganjie8943492011-11-01 13:04:10 -07001397 mfd->ov0_blt_state = 0;
1398 mfd->use_ov0_blt = 0 ;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399
1400 /* add panel data */
1401 if (platform_device_add_data
1402 (msm_fb_dev, pdev->dev.platform_data,
1403 sizeof(struct msm_fb_panel_data))) {
1404 printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
1405 rc = -ENOMEM;
1406 goto mdp_probe_err;
1407 }
1408 /* data chain */
1409 pdata = msm_fb_dev->dev.platform_data;
1410 pdata->on = mdp_on;
1411 pdata->off = mdp_off;
1412 pdata->next = pdev;
1413
1414 mdp_prim_panel_type = mfd->panel.type;
1415 switch (mfd->panel.type) {
1416 case EXT_MDDI_PANEL:
1417 case MDDI_PANEL:
1418 case EBI2_PANEL:
1419 INIT_WORK(&mfd->dma_update_worker,
1420 mdp_lcd_update_workqueue_handler);
1421 INIT_WORK(&mfd->vsync_resync_worker,
1422 mdp_vsync_resync_workqueue_handler);
1423 mfd->hw_refresh = FALSE;
1424
1425 if (mfd->panel.type == EXT_MDDI_PANEL) {
1426 /* 15 fps -> 66 msec */
1427 mfd->refresh_timer_duration = (66 * HZ / 1000);
1428 } else {
1429 /* 24 fps -> 42 msec */
1430 mfd->refresh_timer_duration = (42 * HZ / 1000);
1431 }
1432
1433#ifdef CONFIG_FB_MSM_MDP22
1434 mfd->dma_fnc = mdp_dma2_update;
1435 mfd->dma = &dma2_data;
1436#else
1437 if (mfd->panel_info.pdest == DISPLAY_1) {
1438#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
1439 mfd->dma_fnc = mdp4_mddi_overlay;
1440 mfd->cursor_update = mdp4_mddi_overlay_cursor;
1441#else
1442 mfd->dma_fnc = mdp_dma2_update;
1443#endif
1444 mfd->dma = &dma2_data;
1445 mfd->lut_update = mdp_lut_update_nonlcdc;
1446 mfd->do_histogram = mdp_do_histogram;
1447 } else {
1448 mfd->dma_fnc = mdp_dma_s_update;
1449 mfd->dma = &dma_s_data;
1450 }
1451#endif
1452 if (mdp_pdata)
1453 mfd->vsync_gpio = mdp_pdata->gpio;
1454 else
1455 mfd->vsync_gpio = -1;
1456
1457#ifdef CONFIG_FB_MSM_MDP40
1458 if (mfd->panel.type == EBI2_PANEL)
1459 intf = EBI2_INTF;
1460 else
1461 intf = MDDI_INTF;
1462
1463 if (mfd->panel_info.pdest == DISPLAY_1)
1464 if_no = PRIMARY_INTF_SEL;
1465 else
1466 if_no = SECONDARY_INTF_SEL;
1467
1468 mdp4_display_intf_sel(if_no, intf);
1469#endif
1470 mdp_config_vsync(mfd);
1471 break;
1472
1473#ifdef CONFIG_FB_MSM_MIPI_DSI
1474 case MIPI_VIDEO_PANEL:
1475#ifndef CONFIG_FB_MSM_MDP303
1476 pdata->on = mdp4_dsi_video_on;
1477 pdata->off = mdp4_dsi_video_off;
1478 mfd->hw_refresh = TRUE;
1479 mfd->dma_fnc = mdp4_dsi_video_overlay;
Ravishangar Kalyaname7833e22011-07-22 16:20:19 -07001480 mfd->lut_update = mdp_lut_update_lcdc;
1481 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 if (mfd->panel_info.pdest == DISPLAY_1) {
1483 if_no = PRIMARY_INTF_SEL;
1484 mfd->dma = &dma2_data;
1485 } else {
1486 if_no = EXTERNAL_INTF_SEL;
1487 mfd->dma = &dma_e_data;
1488 }
1489 mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
1490#else
1491 pdata->on = mdp_dsi_video_on;
1492 pdata->off = mdp_dsi_video_off;
1493 mfd->hw_refresh = TRUE;
1494 mfd->dma_fnc = mdp_dsi_video_update;
1495 mfd->do_histogram = mdp_do_histogram;
1496 if (mfd->panel_info.pdest == DISPLAY_1)
1497 mfd->dma = &dma2_data;
1498 else {
1499 printk(KERN_ERR "Invalid Selection of destination panel\n");
1500 rc = -ENODEV;
1501 goto mdp_probe_err;
1502 }
1503
1504#endif
Adrian Salido-Morenod1b9d7a2011-10-14 18:18:51 -07001505 if (mdp_rev >= MDP_REV_40)
1506 mfd->cursor_update = mdp_hw_cursor_sync_update;
1507 else
1508 mfd->cursor_update = mdp_hw_cursor_update;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509 break;
1510
1511 case MIPI_CMD_PANEL:
1512#ifndef CONFIG_FB_MSM_MDP303
1513 mfd->dma_fnc = mdp4_dsi_cmd_overlay;
1514#ifdef CONFIG_FB_MSM_MDP40
1515 mipi = &mfd->panel_info.mipi;
1516 configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
1517#endif
1518 if (mfd->panel_info.pdest == DISPLAY_1) {
1519 if_no = PRIMARY_INTF_SEL;
1520 mfd->dma = &dma2_data;
1521 } else {
1522 if_no = SECONDARY_INTF_SEL;
1523 mfd->dma = &dma_s_data;
1524 }
Carl Vanderlip18f63082011-07-22 12:32:33 -07001525 mfd->lut_update = mdp_lut_update_nonlcdc;
1526 mfd->do_histogram = mdp_do_histogram;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001527 mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
1528#else
1529 mfd->dma_fnc = mdp_dma2_update;
1530 mfd->do_histogram = mdp_do_histogram;
1531 if (mfd->panel_info.pdest == DISPLAY_1)
1532 mfd->dma = &dma2_data;
1533 else {
1534 printk(KERN_ERR "Invalid Selection of destination panel\n");
1535 rc = -ENODEV;
1536 goto mdp_probe_err;
1537 }
1538#endif
1539 mdp_config_vsync(mfd);
1540 break;
1541#endif
1542
1543#ifdef CONFIG_FB_MSM_DTV
1544 case DTV_PANEL:
1545 pdata->on = mdp4_dtv_on;
1546 pdata->off = mdp4_dtv_off;
1547 mfd->hw_refresh = TRUE;
1548 mfd->cursor_update = mdp_hw_cursor_update;
1549 mfd->dma_fnc = mdp4_dtv_overlay;
1550 mfd->dma = &dma_e_data;
1551 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1552 break;
1553#endif
1554 case HDMI_PANEL:
1555 case LCDC_PANEL:
1556 pdata->on = mdp_lcdc_on;
1557 pdata->off = mdp_lcdc_off;
1558 mfd->hw_refresh = TRUE;
1559#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
1560 mfd->cursor_update = mdp_hw_cursor_sync_update;
1561#else
1562 mfd->cursor_update = mdp_hw_cursor_update;
1563#endif
1564#ifndef CONFIG_FB_MSM_MDP22
1565 mfd->lut_update = mdp_lut_update_lcdc;
1566 mfd->do_histogram = mdp_do_histogram;
1567#endif
1568#ifdef CONFIG_FB_MSM_OVERLAY
1569 mfd->dma_fnc = mdp4_lcdc_overlay;
1570#else
1571 mfd->dma_fnc = mdp_lcdc_update;
1572#endif
1573
1574#ifdef CONFIG_FB_MSM_MDP40
1575 configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
1576 * 23 / 20);
1577 if (mfd->panel.type == HDMI_PANEL) {
1578 mfd->dma = &dma_e_data;
1579 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
1580 } else {
1581 mfd->dma = &dma2_data;
1582 mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
1583 }
1584#else
1585 mfd->dma = &dma2_data;
1586 spin_lock_irqsave(&mdp_spin_lock, flag);
1587 mdp_intr_mask &= ~MDP_DMA_P_DONE;
1588 outp32(MDP_INTR_ENABLE, mdp_intr_mask);
1589 spin_unlock_irqrestore(&mdp_spin_lock, flag);
1590#endif
1591 break;
1592
1593 case TV_PANEL:
1594#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
1595 pdata->on = mdp4_atv_on;
1596 pdata->off = mdp4_atv_off;
1597 mfd->dma_fnc = mdp4_atv_overlay;
1598 mfd->dma = &dma_e_data;
1599 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
1600#else
1601 pdata->on = mdp_dma3_on;
1602 pdata->off = mdp_dma3_off;
1603 mfd->hw_refresh = TRUE;
1604 mfd->dma_fnc = mdp_dma3_update;
1605 mfd->dma = &dma3_data;
1606#endif
1607 break;
1608
Vinay Kalia27020d12011-10-14 17:50:29 -07001609#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
1610 case WRITEBACK_PANEL:
Rajesh Sastrulab52368b2011-12-22 12:09:17 -08001611 {
1612 unsigned int mdp_version;
1613 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON,
1614 FALSE);
1615 mdp_version = inpdw(MDP_BASE + 0x0);
1616 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
1617 FALSE);
1618 if (mdp_version < 0x04030303) {
1619 pr_err("%s: writeback panel not supprted\n",
1620 __func__);
1621 rc = -ENODEV;
1622 goto mdp_probe_err;
1623 }
1624 pdata->on = mdp4_overlay_writeback_on;
1625 pdata->off = mdp4_overlay_writeback_off;
1626 mfd->dma_fnc = mdp4_writeback_overlay;
1627 mfd->dma = &dma_wb_data;
1628 mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
1629 }
Vinay Kalia27020d12011-10-14 17:50:29 -07001630 break;
1631#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001632 default:
1633 printk(KERN_ERR "mdp_probe: unknown device type!\n");
1634 rc = -ENODEV;
1635 goto mdp_probe_err;
1636 }
1637#ifdef CONFIG_FB_MSM_MDP40
1638 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
1639 mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
1640 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1641#endif
1642
1643#ifdef CONFIG_MSM_BUS_SCALING
1644 if (!mdp_bus_scale_handle && mdp_pdata &&
1645 mdp_pdata->mdp_bus_scale_table) {
1646 mdp_bus_scale_handle =
1647 msm_bus_scale_register_client(
1648 mdp_pdata->mdp_bus_scale_table);
1649 if (!mdp_bus_scale_handle) {
1650 printk(KERN_ERR "%s not able to get bus scale\n",
1651 __func__);
1652 return -ENOMEM;
1653 }
1654 }
1655#endif
Huaibin Yang91018362011-11-29 14:57:43 -08001656
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 /* set driver data */
1658 platform_set_drvdata(msm_fb_dev, mfd);
1659
1660 rc = platform_device_add(msm_fb_dev);
1661 if (rc) {
1662 goto mdp_probe_err;
1663 }
1664
1665 pm_runtime_set_active(&pdev->dev);
1666 pm_runtime_enable(&pdev->dev);
1667
1668 pdev_list[pdev_list_cnt++] = pdev;
1669 mdp4_extn_disp = 0;
1670 return 0;
1671
1672 mdp_probe_err:
1673 platform_device_put(msm_fb_dev);
1674#ifdef CONFIG_MSM_BUS_SCALING
1675 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1676 mdp_bus_scale_handle > 0)
1677 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1678#endif
1679 return rc;
1680}
1681
Ravishangar Kalyanam7b879b62011-12-16 16:04:09 -08001682void mdp_footswitch_ctrl(boolean on)
1683{
1684 mutex_lock(&mdp_suspend_mutex);
1685 if (!mdp_suspended || mdp4_extn_disp || !footswitch ||
1686 mdp_rev <= MDP_REV_41) {
1687 mutex_unlock(&mdp_suspend_mutex);
1688 return;
1689 }
1690
1691 if (on && !mdp_footswitch_on) {
1692 pr_debug("Enable MDP FS\n");
1693 regulator_enable(footswitch);
1694 mdp_footswitch_on = 1;
1695 } else if (!on && mdp_footswitch_on) {
1696 pr_debug("Disable MDP FS\n");
1697 regulator_disable(footswitch);
1698 mdp_footswitch_on = 0;
1699 }
1700
1701 mutex_unlock(&mdp_suspend_mutex);
1702}
1703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001704#ifdef CONFIG_PM
1705static void mdp_suspend_sub(void)
1706{
1707 /* cancel pipe ctrl worker */
1708 cancel_delayed_work(&mdp_pipe_ctrl_worker);
1709
1710 /* for workder can't be cancelled... */
1711 flush_workqueue(mdp_pipe_ctrl_wq);
1712
1713 /* let's wait for PPP completion */
1714 while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
1715 cpu_relax();
1716
1717 /* try to power down */
1718 mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
1719
1720 mutex_lock(&mdp_suspend_mutex);
1721 mdp_suspended = TRUE;
1722 mutex_unlock(&mdp_suspend_mutex);
1723}
1724#endif
1725
1726#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
1727static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
1728{
1729 if (pdev->id == 0) {
1730 mdp_suspend_sub();
1731 if (mdp_current_clk_on) {
1732 printk(KERN_WARNING"MDP suspend failed\n");
1733 return -EBUSY;
1734 }
1735 }
1736
1737 return 0;
1738}
1739#endif
1740
1741#ifdef CONFIG_HAS_EARLYSUSPEND
1742static void mdp_early_suspend(struct early_suspend *h)
1743{
1744 mdp_suspend_sub();
Ravishangar Kalyanamdf021cf2011-10-20 12:53:27 -07001745#ifdef CONFIG_FB_MSM_DTV
1746 mdp4_dtv_set_black_screen();
1747#endif
Ravishangar Kalyanam7b879b62011-12-16 16:04:09 -08001748 mdp_footswitch_ctrl(FALSE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001749}
1750
1751static void mdp_early_resume(struct early_suspend *h)
1752{
Ravishangar Kalyanam7b879b62011-12-16 16:04:09 -08001753 mdp_footswitch_ctrl(TRUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001754 mutex_lock(&mdp_suspend_mutex);
1755 mdp_suspended = FALSE;
1756 mutex_unlock(&mdp_suspend_mutex);
1757}
1758#endif
1759
1760static int mdp_remove(struct platform_device *pdev)
1761{
1762 if (footswitch != NULL)
1763 regulator_put(footswitch);
1764 iounmap(msm_mdp_base);
1765 pm_runtime_disable(&pdev->dev);
1766#ifdef CONFIG_MSM_BUS_SCALING
1767 if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
1768 mdp_bus_scale_handle > 0)
1769 msm_bus_scale_unregister_client(mdp_bus_scale_handle);
1770#endif
1771 return 0;
1772}
1773
1774static int mdp_register_driver(void)
1775{
1776#ifdef CONFIG_HAS_EARLYSUSPEND
1777 early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
1778 early_suspend.suspend = mdp_early_suspend;
1779 early_suspend.resume = mdp_early_resume;
1780 register_early_suspend(&early_suspend);
1781#endif
1782
1783 return platform_driver_register(&mdp_driver);
1784}
1785
1786static int __init mdp_driver_init(void)
1787{
1788 int ret;
1789
1790 mdp_drv_init();
1791
1792 ret = mdp_register_driver();
1793 if (ret) {
1794 printk(KERN_ERR "mdp_register_driver() failed!\n");
1795 return ret;
1796 }
1797
1798#if defined(CONFIG_DEBUG_FS)
1799 mdp_debugfs_init();
1800#endif
1801
1802 return 0;
1803
1804}
1805
1806module_init(mdp_driver_init);