Camera Bring-up MR1.

Change-Id: I84d3b4d7ff4147506fdfc81cd26b0aa9cfb72467
Signed-off-by: Sudhir Sharma <sudsha@codeaurora.org>
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
old mode 100644
new mode 100755
index d1d4eaa..a94dc66
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -16,6 +16,7 @@
 #ifdef MSM_CAMERA_BIONIC
 #include <sys/types.h>
 #endif
+#include <linux/videodev2.h>
 #include <linux/types.h>
 #include <linux/ioctl.h>
 #ifdef __KERNEL__
@@ -27,12 +28,14 @@
 #include <linux/time.h>
 #endif
 
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 
 #define BIT(nr)   (1UL << (nr))
 
 #define MSM_CAM_IOCTL_MAGIC 'm'
 
+#define MAX_SERVER_PAYLOAD_LENGTH 8192
+
 #define MSM_CAM_IOCTL_GET_SENSOR_INFO \
 	_IOR(MSM_CAM_IOCTL_MAGIC, 1, struct msm_camsensor_info *)
 
@@ -151,7 +154,7 @@
 	_IOW(MSM_CAM_IOCTL_MAGIC, 39, struct msm_camera_st_frame *)
 
 #define MSM_CAM_IOCTL_V4L2_EVT_NOTIFY \
-	_IOR(MSM_CAM_IOCTL_MAGIC, 40, struct v4l2_event *)
+	_IOW(MSM_CAM_IOCTL_MAGIC, 40, struct v4l2_event_and_payload)
 
 #define MSM_CAM_IOCTL_SET_MEM_MAP_INFO \
 	_IOR(MSM_CAM_IOCTL_MAGIC, 41, struct msm_mem_map_info *)
@@ -216,6 +219,47 @@
 #define MSM_CAM_IOCTL_STATS_UNREG_BUF \
 	_IOR(MSM_CAM_IOCTL_MAGIC, 61, struct msm_stats_flush_bufq *)
 
+#define MSM_CAM_IOCTL_CSIC_IO_CFG \
+	_IOWR(MSM_CAM_IOCTL_MAGIC, 62, struct csic_cfg_data *)
+
+#define MSM_CAM_IOCTL_CSID_IO_CFG \
+	_IOWR(MSM_CAM_IOCTL_MAGIC, 63, struct csid_cfg_data *)
+
+#define MSM_CAM_IOCTL_CSIPHY_IO_CFG \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 64, struct csiphy_cfg_data *)
+
+#define MSM_CAM_IOCTL_OEM \
+	_IOW(MSM_CAM_IOCTL_MAGIC, 65, struct sensor_cfg_data *)
+
+#define MSM_CAM_IOCTL_AXI_INIT \
+	_IOWR(MSM_CAM_IOCTL_MAGIC, 66, uint8_t *)
+
+#define MSM_CAM_IOCTL_AXI_RELEASE \
+	_IO(MSM_CAM_IOCTL_MAGIC, 67)
+
+#define MSM_CAM_IOCTL_V4L2_EVT_NATIVE_CMD \
+	_IOWR(MSM_CAM_IOCTL_MAGIC, 68, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_IOCTL_V4L2_EVT_NATIVE_FRONT_CMD \
+	_IOWR(MSM_CAM_IOCTL_MAGIC, 69, struct msm_camera_v4l2_ioctl_t)
+
+#define MSM_CAM_IOCTL_INTF_MCTL_MAPPING_CFG \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 70, struct intf_mctl_mapping_cfg *)
+
+struct ioctl_native_cmd {
+	unsigned short mode;
+	unsigned short address;
+	unsigned short value_1;
+	unsigned short value_2;
+	unsigned short value_3;
+};
+
+struct v4l2_event_and_payload {
+	struct v4l2_event evt;
+	uint32_t payload_length;
+	uint32_t transaction_id;
+	void *payload;
+};
 
 struct msm_stats_reqbuf {
 	int num_buf;		/* how many buffers requested */
@@ -350,6 +394,27 @@
 	uint32_t inst_handle;
 };
 
+struct msm_pp_crop {
+	uint32_t  src_x;
+	uint32_t  src_y;
+	uint32_t  src_w;
+	uint32_t  src_h;
+	uint32_t  dst_x;
+	uint32_t  dst_y;
+	uint32_t  dst_w;
+	uint32_t  dst_h;
+	uint8_t update_flag;
+};
+
+struct msm_mctl_pp_frame_cmd {
+	uint32_t cookie;
+	uint8_t  vpe_output_action;
+	struct msm_pp_frame src_frame;
+	struct msm_pp_frame dest_frame;
+	struct msm_pp_crop crop;
+	int path;
+};
+
 struct msm_cam_evt_divert_frame {
 	unsigned short image_mode;
 	unsigned short op_mode;
@@ -438,11 +503,11 @@
 #define CMD_STATS_AF_ENABLE		13
 #define CMD_STATS_AEC_ENABLE		14
 #define CMD_STATS_AWB_ENABLE		15
-#define CMD_STATS_ENABLE  		16
+#define CMD_STATS_ENABLE		16
 
 #define CMD_STATS_AXI_CFG		17
 #define CMD_STATS_AEC_AXI_CFG		18
-#define CMD_STATS_AF_AXI_CFG 		19
+#define CMD_STATS_AF_AXI_CFG		19
 #define CMD_STATS_AWB_AXI_CFG		20
 #define CMD_STATS_RS_AXI_CFG		21
 #define CMD_STATS_CS_AXI_CFG		22
@@ -486,8 +551,8 @@
 #define CMD_STATS_BG_BUF_RELEASE 56
 #define CMD_STATS_BF_BUF_RELEASE 57
 #define CMD_STATS_BHIST_BUF_RELEASE 58
-#define CMD_VFE_SOF_COUNT_UPDATE 59
-#define CMD_VFE_COUNT_SOF_ENABLE 60
+#define CMD_VFE_PIX_SOF_COUNT_UPDATE 59
+#define CMD_VFE_COUNT_PIX_SOF_ENABLE 60
 
 #define CMD_AXI_CFG_PRIM               BIT(8)
 #define CMD_AXI_CFG_PRIM_ALL_CHNLS     BIT(9)
@@ -495,10 +560,13 @@
 #define CMD_AXI_CFG_SEC_ALL_CHNLS      BIT(11)
 #define CMD_AXI_CFG_TERT1              BIT(12)
 #define CMD_AXI_CFG_TERT2              BIT(13)
+#define CMD_AXI_CFG_TERT3              BIT(14)
 
 #define CMD_AXI_START  0xE1
 #define CMD_AXI_STOP   0xE2
 #define CMD_AXI_RESET  0xE3
+#define CMD_AXI_ABORT  0xE4
+
 
 
 #define AXI_CMD_PREVIEW      BIT(0)
@@ -583,6 +651,7 @@
 	MSM_STATS_TYPE_BF,  /* Bayer Focus */
 	MSM_STATS_TYPE_BHIST,   /* Bayer Hist */
 	MSM_STATS_TYPE_AE_AW,   /* legacy stats for vfe 2.x*/
+	MSM_STATS_TYPE_COMP, /* Composite stats */
 	MSM_STATS_TYPE_MAX  /* MAX */
 };
 
@@ -645,8 +714,7 @@
 #define OUTPUT_SEC_ALL_CHNLS     BIT(11)
 #define OUTPUT_TERT1             BIT(12)
 #define OUTPUT_TERT2             BIT(13)
-
-
+#define OUTPUT_TERT3             BIT(14)
 
 #define MSM_FRAME_PREV_1	0
 #define MSM_FRAME_PREV_2	1
@@ -662,8 +730,12 @@
 #define OUTPUT_TYPE_ST_D BIT(7)
 #define OUTPUT_TYPE_R    BIT(8)
 #define OUTPUT_TYPE_R1   BIT(9)
-
-
+#define OUTPUT_TYPE_SAEC   BIT(10)
+#define OUTPUT_TYPE_SAFC   BIT(11)
+#define OUTPUT_TYPE_SAWB   BIT(12)
+#define OUTPUT_TYPE_IHST   BIT(13)
+#define OUTPUT_TYPE_CSTA   BIT(14)
+#define OUTPUT_TYPE_R2   BIT(15)
 
 struct fd_roi_info {
 	void *info;
@@ -778,27 +850,39 @@
 /* extendedmode for the thumb nail image in VIDIOC_S_PARM */
 #define MSM_V4L2_EXT_CAPTURE_MODE_THUMBNAIL \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+4)
-#define MSM_V4L2_EXT_CAPTURE_MODE_RAW \
+/* ISP_PIX_OUTPUT1: no pp, directly send output1 buf to user */
+#define MSM_V4L2_EXT_CAPTURE_MODE_ISP_PIX_OUTPUT1 \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+5)
-#define MSM_V4L2_EXT_CAPTURE_MODE_RDI \
+/* ISP_PIX_OUTPUT2: no pp, directly send output2 buf to user */
+#define MSM_V4L2_EXT_CAPTURE_MODE_ISP_PIX_OUTPUT2 \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+6)
-#define MSM_V4L2_EXT_CAPTURE_MODE_RDI1 \
+/* raw image type */
+#define MSM_V4L2_EXT_CAPTURE_MODE_RAW \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+7)
-#define MSM_V4L2_EXT_CAPTURE_MODE_RDI2 \
+/* RDI dump */
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+8)
-#define MSM_V4L2_EXT_CAPTURE_MODE_AEC \
+/* RDI dump 1 */
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI1 \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+9)
-#define MSM_V4L2_EXT_CAPTURE_MODE_AWB \
+/* RDI dump 2 */
+#define MSM_V4L2_EXT_CAPTURE_MODE_RDI2 \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+10)
-#define MSM_V4L2_EXT_CAPTURE_MODE_AF \
+#define MSM_V4L2_EXT_CAPTURE_MODE_AEC \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+11)
-#define MSM_V4L2_EXT_CAPTURE_MODE_IHIST \
+#define MSM_V4L2_EXT_CAPTURE_MODE_AWB \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+12)
-#define MSM_V4L2_EXT_CAPTURE_MODE_CS \
+#define MSM_V4L2_EXT_CAPTURE_MODE_AF \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+13)
-#define MSM_V4L2_EXT_CAPTURE_MODE_RS \
+#define MSM_V4L2_EXT_CAPTURE_MODE_IHIST \
 	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+14)
-#define MSM_V4L2_EXT_CAPTURE_MODE_MAX (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+15)
+#define MSM_V4L2_EXT_CAPTURE_MODE_CS \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+15)
+#define MSM_V4L2_EXT_CAPTURE_MODE_RS \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+16)
+#define MSM_V4L2_EXT_CAPTURE_MODE_CSTA \
+	(MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+17)
+#define MSM_V4L2_EXT_CAPTURE_MODE_MAX (MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT+18)
 
 #define MSM_V4L2_PID_MOTION_ISO              V4L2_CID_PRIVATE_BASE
 #define MSM_V4L2_PID_EFFECT                 (V4L2_CID_PRIVATE_BASE+1)
@@ -851,7 +935,8 @@
 #define MSM_V4L2_CLOSE			11
 #define MSM_V4L2_SET_CTRL_CMD	12
 #define MSM_V4L2_EVT_SUB_MASK	13
-#define MSM_V4L2_MAX			14
+#define MSM_V4L2_PRIVATE_CMD    14
+#define MSM_V4L2_MAX			15
 #define V4L2_CAMERA_EXIT		43
 
 struct crop_info {
@@ -918,7 +1003,15 @@
 #define CFG_START_STREAM              44
 #define CFG_STOP_STREAM               45
 #define CFG_GET_CSI_PARAMS            46
-#define CFG_MAX			47
+#define CFG_POWER_UP                  47
+#define CFG_POWER_DOWN                48
+#define CFG_WRITE_I2C_ARRAY           49
+#define CFG_READ_I2C_ARRAY            50
+#define CFG_PCLK_CHANGE               51
+#define CFG_CONFIG_VREG_ARRAY         52
+#define CFG_CONFIG_CLK_ARRAY          53
+#define CFG_GPIO_OP                   54
+#define CFG_MAX                       55
 
 
 #define MOVE_NEAR	0
@@ -1162,7 +1255,7 @@
 	uint16_t gb_gain;
 	uint16_t gain_adjust;
 };
-struct sensor_3d_cali_data_t{
+struct sensor_3d_cali_data_t {
 	unsigned char left_p_matrix[3][4][8];
 	unsigned char right_p_matrix[3][4][8];
 	unsigned char square_len[8];
@@ -1192,17 +1285,6 @@
 	uint8_t pict_res;
 };
 
-#define ROLLOFF_CALDATA_SIZE    (17 * 13)
-typedef struct
-{
-    unsigned short          mesh_rolloff_table_size;     // TableSize
-    uint8_t                 r_gain[ROLLOFF_CALDATA_SIZE];   // RGain
-    uint8_t                 gr_gain[ROLLOFF_CALDATA_SIZE];  // GRGain
-    uint8_t                 gb_gain[ROLLOFF_CALDATA_SIZE];  // GBGain
-    uint8_t                 b_gain[ROLLOFF_CALDATA_SIZE];   // BGain
-    uint8_t                 red_ref[17];
-} rolloff_caldata_array_type;
-
 struct sensor_calib_data {
 	/* Color Related Measurements */
 	uint16_t r_over_g;
@@ -1215,8 +1297,6 @@
 	uint16_t stroke_amt;
 	uint16_t af_pos_1m;
 	uint16_t af_pos_inf;
-	/* Lens Shading Calibration Data */
-	rolloff_caldata_array_type rolloff;
 };
 
 enum msm_sensor_resolution_t {
@@ -1246,6 +1326,33 @@
 	uint16_t num_info;
 };
 
+struct msm_sensor_exp_gain_info_t {
+	uint16_t coarse_int_time_addr;
+	uint16_t global_gain_addr;
+	uint16_t vert_offset;
+};
+
+struct msm_sensor_output_reg_addr_t {
+	uint16_t x_output;
+	uint16_t y_output;
+	uint16_t line_length_pclk;
+	uint16_t frame_length_lines;
+};
+
+struct sensor_driver_params_type {
+	struct msm_camera_i2c_reg_setting *init_settings;
+	uint16_t init_settings_size;
+	struct msm_camera_i2c_reg_setting *mode_settings;
+	uint16_t mode_settings_size;
+	struct msm_sensor_output_reg_addr_t *sensor_output_reg_addr;
+	struct msm_camera_i2c_reg_setting *start_settings;
+	struct msm_camera_i2c_reg_setting *stop_settings;
+	struct msm_camera_i2c_reg_setting *groupon_settings;
+	struct msm_camera_i2c_reg_setting *groupoff_settings;
+	struct msm_sensor_exp_gain_info_t *sensor_exp_gain_info;
+	struct msm_sensor_output_info_t *output_info;
+};
+
 struct mirror_flip {
 	int32_t x_mirror;
 	int32_t y_flip;
@@ -1268,11 +1375,82 @@
 };
 
 struct csi_lane_params_t {
-	uint8_t csi_lane_assign;
+	uint16_t csi_lane_assign;
 	uint8_t csi_lane_mask;
 	uint8_t csi_if;
-	uint8_t csid_core;
-	uint32_t csid_version;
+	uint8_t csid_core[2];
+	uint8_t csi_phy_sel;
+};
+
+struct msm_camera_csid_lut_params {
+	uint8_t num_cid;
+	struct msm_camera_csid_vc_cfg *vc_cfg;
+};
+
+struct msm_camera_csid_params {
+	uint8_t lane_cnt;
+	uint16_t lane_assign;
+	uint8_t phy_sel;
+	struct msm_camera_csid_lut_params lut_params;
+};
+
+struct msm_camera_csiphy_params {
+	uint8_t lane_cnt;
+	uint8_t settle_cnt;
+	uint16_t lane_mask;
+	uint8_t combo_mode;
+};
+
+struct msm_camera_csi2_params {
+	struct msm_camera_csid_params csid_params;
+	struct msm_camera_csiphy_params csiphy_params;
+};
+
+enum msm_camera_csi_data_format {
+	CSI_8BIT,
+	CSI_10BIT,
+	CSI_12BIT,
+};
+
+struct msm_camera_csi_params {
+	enum msm_camera_csi_data_format data_format;
+	uint8_t lane_cnt;
+	uint8_t lane_assign;
+	uint8_t settle_cnt;
+	uint8_t dpcm_scheme;
+};
+
+enum csic_cfg_type_t {
+	CSIC_INIT,
+	CSIC_CFG,
+};
+
+struct csic_cfg_data {
+	enum csic_cfg_type_t cfgtype;
+	struct msm_camera_csi_params *csic_params;
+};
+
+enum csid_cfg_type_t {
+	CSID_INIT,
+	CSID_CFG,
+};
+
+struct csid_cfg_data {
+	enum csid_cfg_type_t cfgtype;
+	union {
+		uint32_t csid_version;
+		struct msm_camera_csid_params *csid_params;
+	} cfg;
+};
+
+enum csiphy_cfg_type_t {
+	CSIPHY_INIT,
+	CSIPHY_CFG,
+};
+
+struct csiphy_cfg_data {
+	enum csiphy_cfg_type_t cfgtype;
+	struct msm_camera_csiphy_params *csiphy_params;
 };
 
 #define CSI_EMBED_DATA 0x12
@@ -1281,6 +1459,9 @@
 #define CSI_RAW8    0x2A
 #define CSI_RAW10   0x2B
 #define CSI_RAW12   0x2C
+#define CSI_YUV420_Y_8 0x30
+#define CSI_YUV420_UV_8 0x31
+#define CSI_YUV420_JM_8 0x32
 
 #define CSI_DECODE_6BIT 0
 #define CSI_DECODE_8BIT 1
@@ -1372,6 +1553,81 @@
 	} cfg;
 };
 
+enum msm_camera_i2c_reg_addr_type {
+	MSM_CAMERA_I2C_BYTE_ADDR = 1,
+	MSM_CAMERA_I2C_WORD_ADDR,
+};
+
+struct msm_camera_i2c_reg_array {
+	uint16_t reg_addr;
+	uint16_t reg_data;
+};
+
+enum msm_camera_i2c_data_type {
+	MSM_CAMERA_I2C_BYTE_DATA = 1,
+	MSM_CAMERA_I2C_WORD_DATA,
+	MSM_CAMERA_I2C_SET_BYTE_MASK,
+	MSM_CAMERA_I2C_UNSET_BYTE_MASK,
+	MSM_CAMERA_I2C_SET_WORD_MASK,
+	MSM_CAMERA_I2C_UNSET_WORD_MASK,
+	MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
+};
+
+struct msm_camera_i2c_reg_setting {
+	struct msm_camera_i2c_reg_array *reg_setting;
+	uint16_t size;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	uint16_t delay;
+};
+
+enum oem_setting_type {
+	I2C_READ = 1,
+	I2C_WRITE,
+	GPIO_OP,
+	EEPROM_READ,
+	VREG_SET,
+	CLK_SET,
+};
+
+struct sensor_oem_setting {
+	enum oem_setting_type type;
+	void *data;
+};
+
+enum camera_vreg_type {
+	REG_LDO,
+	REG_VS,
+	REG_GPIO,
+};
+
+struct camera_vreg_t {
+	const char *reg_name;
+	enum camera_vreg_type type;
+	int min_voltage;
+	int max_voltage;
+	int op_mode;
+	uint32_t delay;
+};
+
+struct msm_camera_vreg_setting {
+	struct camera_vreg_t *cam_vreg;
+	uint16_t num_vreg;
+	uint8_t enable;
+};
+
+struct msm_cam_clk_info {
+	const char *clk_name;
+	long clk_rate;
+	uint32_t delay;
+};
+
+struct msm_cam_clk_setting {
+	struct msm_cam_clk_info *clk_info;
+	uint16_t num_clk_info;
+	uint8_t enable;
+};
+
 struct sensor_cfg_data {
 	int cfgtype;
 	int mode;
@@ -1408,12 +1664,30 @@
 		int ae_mode;
 		uint8_t wb_val;
 		int8_t exp_compensation;
+		uint32_t pclk;
 		struct cord aec_cord;
 		int is_autoflash;
 		struct mirror_flip mirror_flip;
+		void *setting;
 	} cfg;
 };
 
+enum gpio_operation_type {
+	GPIO_REQUEST,
+	GPIO_FREE,
+	GPIO_SET_DIRECTION_OUTPUT,
+	GPIO_SET_DIRECTION_INPUT,
+	GPIO_GET_VALUE,
+	GPIO_SET_VALUE,
+};
+
+struct msm_cam_gpio_operation {
+	enum gpio_operation_type op_type;
+	unsigned address;
+	int value;
+	const char *tag;
+};
+
 struct damping_params_t {
 	uint32_t damping_step;
 	uint32_t damping_delay;
@@ -1570,11 +1844,17 @@
 	struct pixel_t video_coord[128];
 };
 
+struct msm_calib_raw {
+	uint8_t *data;
+	uint32_t size;
+};
+
 struct msm_camera_eeprom_info_t {
 	struct msm_eeprom_support af;
 	struct msm_eeprom_support wb;
 	struct msm_eeprom_support lsc;
 	struct msm_eeprom_support dpc;
+	struct msm_eeprom_support raw;
 };
 
 struct msm_eeprom_cfg_data {
@@ -1747,6 +2027,9 @@
 #define MSM_CAM_V4L2_IOCTL_PRIVATE_G_CTRL \
 	_IOWR('V', BASE_VIDIOC_PRIVATE + 9, struct msm_camera_v4l2_ioctl_t)
 
+#define MSM_CAM_V4L2_IOCTL_PRIVATE_GENERAL \
+	_IOW('V', BASE_VIDIOC_PRIVATE + 10, struct msm_camera_v4l2_ioctl_t)
+
 #define VIDIOC_MSM_VPE_INIT \
 	_IO('V', BASE_VIDIOC_PRIVATE + 15)
 
@@ -1757,7 +2040,7 @@
 	_IOWR('V', BASE_VIDIOC_PRIVATE + 17, struct msm_mctl_pp_params *)
 
 #define VIDIOC_MSM_AXI_INIT \
-	_IO('V', BASE_VIDIOC_PRIVATE + 18)
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 18, uint8_t *)
 
 #define VIDIOC_MSM_AXI_RELEASE \
 	_IO('V', BASE_VIDIOC_PRIVATE + 19)
@@ -1771,22 +2054,27 @@
 #define VIDIOC_MSM_AXI_BUF_CFG \
 	_IOWR('V', BASE_VIDIOC_PRIVATE + 22, void *)
 
+#define VIDIOC_MSM_AXI_RDI_COUNT_UPDATE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct rdi_count_msg)
+
 #define VIDIOC_MSM_VFE_INIT \
-	_IO('V', BASE_VIDIOC_PRIVATE + 22)
+	_IO('V', BASE_VIDIOC_PRIVATE + 24)
 
 #define VIDIOC_MSM_VFE_RELEASE \
-	_IO('V', BASE_VIDIOC_PRIVATE + 23)
+	_IO('V', BASE_VIDIOC_PRIVATE + 25)
 
 struct msm_camera_v4l2_ioctl_t {
 	uint32_t id;
-	void __user *ioctl_ptr;
 	uint32_t len;
+	uint32_t trans_code;
+	void __user *ioctl_ptr;
 };
 
 struct msm_camera_vfe_params_t {
 	uint32_t operation_mode;
 	uint32_t capture_count;
-	uint32_t skip_abort;
+	uint8_t  skip_reset;
+	uint8_t  stop_immediately;
 	uint16_t port_info;
 	uint32_t inst_handle;
 	uint16_t cmd_type;
@@ -1931,6 +2219,13 @@
 	uint32_t rev;
 };
 
+struct intf_mctl_mapping_cfg {
+	int is_bayer_sensor;
+	int vnode_id;
+	int num_entries;
+	uint32_t image_modes[MSM_V4L2_EXT_CAPTURE_MODE_MAX];
+};
+
 #define VIDIOC_MSM_CPP_CFG \
 	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_v4l2_ioctl_t)
 
@@ -1950,7 +2245,8 @@
  *      ------------------------------------
  *      Bits    :  Purpose
  *      ------------------------------------
- *      31 - 24 :  Reserved.
+ *      31      :  is Dev ID valid?
+ *      30 - 24 :  Dev ID.
  *      23      :  is Image mode valid?
  *      22 - 16 :  Image mode.
  *      15      :  is MCTL PP inst idx valid?
@@ -1958,6 +2254,12 @@
  *      7       :  is Video inst idx valid?
  *      6 - 0   :  Video inst idx.
  */
+#define CLR_DEVID_MODE(handle)	(handle &= 0x00FFFFFF)
+#define SET_DEVID_MODE(handle, data)	\
+	(handle |= ((0x1 << 31) | ((data & 0x7F) << 24)))
+#define GET_DEVID_MODE(handle)	\
+	((handle & 0x80000000) ? ((handle & 0x7F000000) >> 24) : 0xFF)
+
 #define CLR_IMG_MODE(handle)	(handle &= 0xFF00FFFF)
 #define SET_IMG_MODE(handle, data)	\
 	(handle |= ((0x1 << 23) | ((data & 0x7F) << 16)))
diff --git a/include/media/msm_gemini.h b/include/media/msm_gemini.h
old mode 100644
new mode 100755
diff --git a/include/media/msm_gestures.h b/include/media/msm_gestures.h
old mode 100644
new mode 100755
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
old mode 100644
new mode 100755
index 9fa5932..bcd670d
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -68,6 +68,10 @@
 #define MSG_ID_RDI0_UPDATE_ACK          49
 #define MSG_ID_RDI1_UPDATE_ACK          50
 #define MSG_ID_RDI2_UPDATE_ACK          51
+#define MSG_ID_PIX0_UPDATE_ACK          52
+#define MSG_ID_PREV_STOP_ACK            53
+#define MSG_ID_OUTPUT_TERTIARY3         54
+
 
 /* ISP command IDs */
 #define VFE_CMD_DUMMY_0                                 0
@@ -324,30 +328,10 @@
 struct msm_vpe_clock_rate {
 	uint32_t rate;
 };
-struct msm_pp_crop {
-	uint32_t  src_x;
-	uint32_t  src_y;
-	uint32_t  src_w;
-	uint32_t  src_h;
-	uint32_t  dst_x;
-	uint32_t  dst_y;
-	uint32_t  dst_w;
-	uint32_t  dst_h;
-	uint8_t update_flag;
-};
+
 #define MSM_MCTL_PP_VPE_FRAME_ACK    (1<<0)
 #define MSM_MCTL_PP_VPE_FRAME_TO_APP (1<<1)
 
-struct msm_mctl_pp_frame_cmd {
-	uint32_t cookie;
-	uint8_t  vpe_output_action;
-	uint32_t src_buf_handle;
-	uint32_t dest_buf_handle;
-	struct msm_pp_crop crop;
-	int path;
-	/* TBD: 3D related */
-};
-
 #define VFE_OUTPUTS_MAIN_AND_PREVIEW    BIT(0)
 #define VFE_OUTPUTS_MAIN_AND_VIDEO      BIT(1)
 #define VFE_OUTPUTS_MAIN_AND_THUMB      BIT(2)
@@ -361,6 +345,7 @@
 #define VFE_OUTPUTS_THUMB_AND_JPEG      BIT(10)
 #define VFE_OUTPUTS_RDI0                BIT(11)
 #define VFE_OUTPUTS_RDI1                BIT(12)
+#define VFE_OUTPUTS_RDI2                BIT(13)
 
 struct msm_frame_info {
 	uint32_t inst_handle;
diff --git a/include/media/msm_jpeg.h b/include/media/msm_jpeg.h
new file mode 100755
index 0000000..11c3247
--- /dev/null
+++ b/include/media/msm_jpeg.h
@@ -0,0 +1,119 @@
+#ifndef __LINUX_MSM_JPEG_H
+#define __LINUX_MSM_JPEG_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define OUTPUT_H2V1  0
+#define OUTPUT_H2V2  1
+#define OUTPUT_BYTE  6
+
+#define MSM_JPEG_IOCTL_MAGIC 'g'
+
+#define MSM_JPEG_IOCTL_GET_HW_VERSION \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 1, struct msm_jpeg_hw_cmd *)
+
+#define MSM_JPEG_IOCTL_RESET \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 2, struct msm_jpeg_ctrl_cmd *)
+
+#define MSM_JPEG_IOCTL_STOP \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 3, struct msm_jpeg_hw_cmds *)
+
+#define MSM_JPEG_IOCTL_START \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 4, struct msm_jpeg_hw_cmds *)
+
+#define MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 5, struct msm_jpeg_buf *)
+
+#define MSM_JPEG_IOCTL_INPUT_GET \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 6, struct msm_jpeg_buf *)
+
+#define MSM_JPEG_IOCTL_INPUT_GET_UNBLOCK \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 7, int)
+
+#define MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 8, struct msm_jpeg_buf *)
+
+#define MSM_JPEG_IOCTL_OUTPUT_GET \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 9, struct msm_jpeg_buf *)
+
+#define MSM_JPEG_IOCTL_OUTPUT_GET_UNBLOCK \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 10, int)
+
+#define MSM_JPEG_IOCTL_EVT_GET \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 11, struct msm_jpeg_ctrl_cmd *)
+
+#define MSM_JPEG_IOCTL_EVT_GET_UNBLOCK \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 12, int)
+
+#define MSM_JPEG_IOCTL_HW_CMD \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 13, struct msm_jpeg_hw_cmd *)
+
+#define MSM_JPEG_IOCTL_HW_CMDS \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 14, struct msm_jpeg_hw_cmds *)
+
+#define MSM_JPEG_IOCTL_TEST_DUMP_REGION \
+	_IOW(MSM_JPEG_IOCTL_MAGIC, 15, unsigned long)
+
+#define MSM_JPEG_MODE_REALTIME_ENCODE 0
+#define MSM_JPEG_MODE_OFFLINE_ENCODE 1
+#define MSM_JPEG_MODE_REALTIME_ROTATION 2
+#define MSM_JPEG_MODE_OFFLINE_ROTATION 3
+
+struct msm_jpeg_ctrl_cmd {
+	uint32_t type;
+	uint32_t len;
+	void     *value;
+};
+
+#define MSM_JPEG_EVT_RESET 0
+#define MSM_JPEG_EVT_SESSION_DONE	1
+#define MSM_JPEG_EVT_ERR 2
+
+struct msm_jpeg_buf {
+	uint32_t type;
+	int      fd;
+
+	void     *vaddr;
+
+	uint32_t y_off;
+	uint32_t y_len;
+	uint32_t framedone_len;
+
+	uint32_t cbcr_off;
+	uint32_t cbcr_len;
+
+	uint32_t num_of_mcu_rows;
+	uint32_t offset;
+};
+
+#define MSM_JPEG_HW_CMD_TYPE_READ      0
+#define MSM_JPEG_HW_CMD_TYPE_WRITE     1
+#define MSM_JPEG_HW_CMD_TYPE_WRITE_OR  2
+#define MSM_JPEG_HW_CMD_TYPE_UWAIT     3
+#define MSM_JPEG_HW_CMD_TYPE_MWAIT     4
+#define MSM_JPEG_HW_CMD_TYPE_MDELAY    5
+#define MSM_JPEG_HW_CMD_TYPE_UDELAY    6
+struct msm_jpeg_hw_cmd {
+
+	uint32_t type:4;
+
+	/* n microseconds of timeout for WAIT */
+	/* n microseconds of time for DELAY */
+	/* repeat n times for READ/WRITE */
+	/* max is 0xFFF, 4095 */
+	uint32_t n:12;
+	uint32_t offset:16;
+	uint32_t mask;
+	union {
+		uint32_t data;   /* for single READ/WRITE/WAIT, n = 1 */
+		uint32_t *pdata;   /* for multiple READ/WRITE/WAIT, n > 1 */
+	};
+};
+
+struct msm_jpeg_hw_cmds {
+	uint32_t m; /* number of elements in the hw_cmd array */
+	struct msm_jpeg_hw_cmd hw_cmd[1];
+};
+
+#endif /* __LINUX_MSM_JPEG_H */
diff --git a/include/media/msm_mercury.h b/include/media/msm_mercury.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-int-device.h b/include/media/v4l2-int-device.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
old mode 100644
new mode 100755
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf-msm-mem.h b/include/media/videobuf-msm-mem.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf2-core.c b/include/media/videobuf2-core.c
new file mode 100755
index 0000000..a7d13a8
--- /dev/null
+++ b/include/media/videobuf2-core.c
@@ -0,0 +1,2114 @@
+/*
+ * videobuf2-core.c - V4L2 driver helper framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *	   Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include <media/videobuf2-core.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...)					\
+	do {								\
+		if (debug >= level)					\
+			printk(KERN_DEBUG "vb2: " fmt, ## arg);		\
+	} while (0)
+
+#define call_memop(q, op, args...)					\
+	(((q)->mem_ops->op) ?						\
+		((q)->mem_ops->op(args)) : 0)
+
+#define call_qop(q, op, args...)					\
+	(((q)->ops->op) ? ((q)->ops->op(args)) : 0)
+
+#define V4L2_BUFFER_STATE_FLAGS	(V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
+				 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
+				 V4L2_BUF_FLAG_PREPARED)
+
+/**
+ * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
+ */
+static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	void *mem_priv;
+	int plane;
+
+	/* Allocate memory for all planes in this buffer */
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
+				      q->plane_sizes[plane]);
+		if (IS_ERR_OR_NULL(mem_priv))
+			goto free;
+
+		/* Associate allocator private data with this plane */
+		vb->planes[plane].mem_priv = mem_priv;
+		vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+	}
+
+	return 0;
+free:
+	/* Free already allocated memory if one of the allocations failed */
+	for (; plane > 0; --plane) {
+		call_memop(q, put, vb->planes[plane - 1].mem_priv);
+		vb->planes[plane - 1].mem_priv = NULL;
+	}
+
+	return -ENOMEM;
+}
+
+/**
+ * __vb2_buf_mem_free() - free memory of the given buffer
+ */
+static void __vb2_buf_mem_free(struct vb2_buffer *vb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	unsigned int plane;
+
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		call_memop(q, put, vb->planes[plane].mem_priv);
+		vb->planes[plane].mem_priv = NULL;
+		dprintk(3, "Freed plane %d of buffer %d\n", plane,
+			vb->v4l2_buf.index);
+	}
+}
+
+/**
+ * __vb2_buf_userptr_put() - release userspace memory associated with
+ * a USERPTR buffer
+ */
+static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	unsigned int plane;
+
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		if (vb->planes[plane].mem_priv)
+			call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+		vb->planes[plane].mem_priv = NULL;
+	}
+}
+
+/**
+ * __setup_offsets() - setup unique offsets ("cookies") for every plane in
+ * every buffer on the queue
+ */
+static void __setup_offsets(struct vb2_queue *q, unsigned int n)
+{
+	unsigned int buffer, plane;
+	struct vb2_buffer *vb;
+	unsigned long off;
+
+	if (q->num_buffers) {
+		struct v4l2_plane *p;
+		vb = q->bufs[q->num_buffers - 1];
+		p = &vb->v4l2_planes[vb->num_planes - 1];
+		off = PAGE_ALIGN(p->m.mem_offset + p->length);
+	} else {
+		off = 0;
+	}
+
+	for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
+		vb = q->bufs[buffer];
+		if (!vb)
+			continue;
+
+		for (plane = 0; plane < vb->num_planes; ++plane) {
+			vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+			vb->v4l2_planes[plane].m.mem_offset = off;
+
+			dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
+					buffer, plane, off);
+
+			off += vb->v4l2_planes[plane].length;
+			off = PAGE_ALIGN(off);
+		}
+	}
+}
+
+/**
+ * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
+ * video buffer memory for all buffers/planes on the queue and initializes the
+ * queue
+ *
+ * Returns the number of buffers successfully allocated.
+ */
+static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
+			     unsigned int num_buffers, unsigned int num_planes)
+{
+	unsigned int buffer;
+	struct vb2_buffer *vb;
+	int ret;
+
+	for (buffer = 0; buffer < num_buffers; ++buffer) {
+		/* Allocate videobuf buffer structures */
+		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
+		if (!vb) {
+			dprintk(1, "Memory alloc for buffer struct failed\n");
+			break;
+		}
+
+		/* Length stores number of planes for multiplanar buffers */
+		if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
+			vb->v4l2_buf.length = num_planes;
+
+		vb->state = VB2_BUF_STATE_DEQUEUED;
+		vb->vb2_queue = q;
+		vb->num_planes = num_planes;
+		vb->v4l2_buf.index = q->num_buffers + buffer;
+		vb->v4l2_buf.type = q->type;
+		vb->v4l2_buf.memory = memory;
+
+		/* Allocate video buffer memory for the MMAP type */
+		if (memory == V4L2_MEMORY_MMAP) {
+			ret = __vb2_buf_mem_alloc(vb);
+			if (ret) {
+				dprintk(1, "Failed allocating memory for "
+						"buffer %d\n", buffer);
+				kfree(vb);
+				break;
+			}
+			/*
+			 * Call the driver-provided buffer initialization
+			 * callback, if given. An error in initialization
+			 * results in queue setup failure.
+			 */
+			ret = call_qop(q, buf_init, vb);
+			if (ret) {
+				dprintk(1, "Buffer %d %p initialization"
+					" failed\n", buffer, vb);
+				__vb2_buf_mem_free(vb);
+				kfree(vb);
+				break;
+			}
+		}
+
+		q->bufs[q->num_buffers + buffer] = vb;
+	}
+
+	__setup_offsets(q, buffer);
+
+	dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
+			buffer, num_planes);
+
+	return buffer;
+}
+
+/**
+ * __vb2_free_mem() - release all video buffer memory for a given queue
+ */
+static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
+{
+	unsigned int buffer;
+	struct vb2_buffer *vb;
+
+	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+	     ++buffer) {
+		vb = q->bufs[buffer];
+		if (!vb)
+			continue;
+
+		/* Free MMAP buffers or release USERPTR buffers */
+		if (q->memory == V4L2_MEMORY_MMAP)
+			__vb2_buf_mem_free(vb);
+		else
+			__vb2_buf_userptr_put(vb);
+	}
+}
+
+/**
+ * __vb2_queue_free() - free buffers at the end of the queue - video memory and
+ * related information, if no buffers are left return the queue to an
+ * uninitialized state. Might be called even if the queue has already been freed.
+ */
+static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+{
+	unsigned int buffer;
+
+	/* Call driver-provided cleanup function for each buffer, if provided */
+	if (q->ops->buf_cleanup) {
+		for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+		     ++buffer) {
+			if (NULL == q->bufs[buffer])
+				continue;
+			q->ops->buf_cleanup(q->bufs[buffer]);
+		}
+	}
+
+	/* Release video buffer memory */
+	__vb2_free_mem(q, buffers);
+
+	/* Free videobuf buffers */
+	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+	     ++buffer) {
+		kfree(q->bufs[buffer]);
+		q->bufs[buffer] = NULL;
+	}
+
+	q->num_buffers -= buffers;
+	if (!q->num_buffers)
+		q->memory = 0;
+	INIT_LIST_HEAD(&q->queued_list);
+}
+
+/**
+ * __verify_planes_array() - verify that the planes array passed in struct
+ * v4l2_buffer from userspace can be safely used
+ */
+static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+	/* Is memory for copying plane information present? */
+	if (NULL == b->m.planes) {
+		dprintk(1, "Multi-planar buffer passed but "
+			   "planes array not provided\n");
+		return -EINVAL;
+	}
+
+	if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
+		dprintk(1, "Incorrect planes array length, "
+			   "expected %d, got %d\n", vb->num_planes, b->length);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * __buffer_in_use() - return true if the buffer is in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+	unsigned int plane;
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		void *mem_priv = vb->planes[plane].mem_priv;
+		/*
+		 * If num_users() has not been provided, call_memop
+		 * will return 0, apparently nobody cares about this
+		 * case anyway. If num_users() returns more than 1,
+		 * we are not the only user of the plane's memory.
+		 */
+		if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * __buffers_in_use() - return true if any buffers on the queue are in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffers_in_use(struct vb2_queue *q)
+{
+	unsigned int buffer;
+	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+		if (__buffer_in_use(q, q->bufs[buffer]))
+			return true;
+	}
+	return false;
+}
+
+/**
+ * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
+ * returned to userspace
+ */
+static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	int ret;
+
+	/* Copy back data such as timestamp, flags, input, etc. */
+	memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
+	b->input = vb->v4l2_buf.input;
+	b->reserved = vb->v4l2_buf.reserved;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
+		ret = __verify_planes_array(vb, b);
+		if (ret)
+			return ret;
+
+		/*
+		 * Fill in plane-related data if userspace provided an array
+		 * for it. The memory and size is verified above.
+		 */
+		memcpy(b->m.planes, vb->v4l2_planes,
+			b->length * sizeof(struct v4l2_plane));
+	} else {
+		/*
+		 * We use length and offset in v4l2_planes array even for
+		 * single-planar buffers, but userspace does not.
+		 */
+		b->length = vb->v4l2_planes[0].length;
+		b->bytesused = vb->v4l2_planes[0].bytesused;
+		if (q->memory == V4L2_MEMORY_MMAP)
+			b->m.offset = vb->v4l2_planes[0].m.mem_offset;
+		else if (q->memory == V4L2_MEMORY_USERPTR)
+			b->m.userptr = vb->v4l2_planes[0].m.userptr;
+	}
+
+	/*
+	 * Clear any buffer state related flags.
+	 */
+	b->flags &= ~V4L2_BUFFER_STATE_FLAGS;
+
+	switch (vb->state) {
+	case VB2_BUF_STATE_QUEUED:
+	case VB2_BUF_STATE_ACTIVE:
+		b->flags |= V4L2_BUF_FLAG_QUEUED;
+		break;
+	case VB2_BUF_STATE_ERROR:
+		b->flags |= V4L2_BUF_FLAG_ERROR;
+		/* fall through */
+	case VB2_BUF_STATE_DONE:
+		b->flags |= V4L2_BUF_FLAG_DONE;
+		break;
+	case VB2_BUF_STATE_PREPARED:
+		b->flags |= V4L2_BUF_FLAG_PREPARED;
+		break;
+	case VB2_BUF_STATE_DEQUEUED:
+		/* nothing */
+		break;
+	}
+
+	if (__buffer_in_use(q, vb))
+		b->flags |= V4L2_BUF_FLAG_MAPPED;
+
+	return 0;
+}
+
+/**
+ * vb2_querybuf() - query video buffer information
+ * @q:		videobuf queue
+ * @b:		buffer struct passed from userspace to vidioc_querybuf handler
+ *		in driver
+ *
+ * Should be called from vidioc_querybuf ioctl handler in driver.
+ * This function will verify the passed v4l2_buffer structure and fill the
+ * relevant information for the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_querybuf handler in driver.
+ */
+int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+	struct vb2_buffer *vb;
+
+	if (b->type != q->type) {
+		dprintk(1, "querybuf: wrong buffer type\n");
+		return -EINVAL;
+	}
+
+	if (b->index >= q->num_buffers) {
+		dprintk(1, "querybuf: buffer index out of range\n");
+		return -EINVAL;
+	}
+	vb = q->bufs[b->index];
+
+	return __fill_v4l2_buffer(vb, b);
+}
+EXPORT_SYMBOL(vb2_querybuf);
+
+/**
+ * __verify_userptr_ops() - verify that all memory operations required for
+ * USERPTR queue type have been provided
+ */
+static int __verify_userptr_ops(struct vb2_queue *q)
+{
+	if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
+	    !q->mem_ops->put_userptr)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * __verify_mmap_ops() - verify that all memory operations required for
+ * MMAP queue type have been provided
+ */
+static int __verify_mmap_ops(struct vb2_queue *q)
+{
+	if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
+	    !q->mem_ops->put || !q->mem_ops->mmap)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * vb2_reqbufs() - Initiate streaming
+ * @q:		videobuf2 queue
+ * @req:	struct passed from userspace to vidioc_reqbufs handler in driver
+ *
+ * Should be called from vidioc_reqbufs ioctl handler of a driver.
+ * This function:
+ * 1) verifies streaming parameters passed from the userspace,
+ * 2) sets up the queue,
+ * 3) negotiates number of buffers and planes per buffer with the driver
+ *    to be used during streaming,
+ * 4) allocates internal buffer structures (struct vb2_buffer), according to
+ *    the agreed parameters,
+ * 5) for MMAP memory type, allocates actual video memory, using the
+ *    memory handling/allocation routines provided during queue initialization
+ *
+ * If req->count is 0, all the memory will be freed instead.
+ * If the queue has been allocated previously (by a previous vb2_reqbufs) call
+ * and the queue is not busy, memory will be reallocated.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_reqbufs handler in driver.
+ */
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+{
+	unsigned int num_buffers, allocated_buffers, num_planes = 0;
+	int ret = 0;
+
+	if (q->fileio) {
+		dprintk(1, "reqbufs: file io in progress\n");
+		return -EBUSY;
+	}
+
+	if (req->memory != V4L2_MEMORY_MMAP
+			&& req->memory != V4L2_MEMORY_USERPTR) {
+		dprintk(1, "reqbufs: unsupported memory type\n");
+		return -EINVAL;
+	}
+
+	if (req->type != q->type) {
+		dprintk(1, "reqbufs: requested type is incorrect\n");
+		return -EINVAL;
+	}
+
+	if (q->streaming) {
+		dprintk(1, "reqbufs: streaming active\n");
+		return -EBUSY;
+	}
+
+	/*
+	 * Make sure all the required memory ops for given memory type
+	 * are available.
+	 */
+	if (req->memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+		dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
+		return -EINVAL;
+	}
+
+	if (req->memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+		dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * If the same number of buffers and memory access method is requested
+	 * then return immediately.
+	 */
+	if (q->memory == req->memory && req->count == q->num_buffers)
+		return 0;
+
+	if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
+		/*
+		 * We already have buffers allocated, so first check if they
+		 * are not in use and can be freed.
+		 */
+		if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
+			dprintk(1, "reqbufs: memory in use, cannot free\n");
+			return -EBUSY;
+		}
+
+		__vb2_queue_free(q, q->num_buffers);
+
+		/*
+		 * In case of REQBUFS(0) return immediately without calling
+		 * driver's queue_setup() callback and allocating resources.
+		 */
+		if (req->count == 0)
+			return 0;
+	}
+
+	/*
+	 * Make sure the requested values and current defaults are sane.
+	 */
+	num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
+	memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
+	memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+	q->memory = req->memory;
+
+	/*
+	 * Ask the driver how many buffers and planes per buffer it requires.
+	 * Driver also sets the size and allocator context for each plane.
+	 */
+	ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
+		       q->plane_sizes, q->alloc_ctx);
+	if (ret)
+		return ret;
+
+	/* Finally, allocate buffers and video memory */
+	ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
+	if (ret == 0) {
+		dprintk(1, "Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	allocated_buffers = ret;
+
+	/*
+	 * Check if driver can handle the allocated number of buffers.
+	 */
+	if (allocated_buffers < num_buffers) {
+		num_buffers = allocated_buffers;
+
+		ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
+			       &num_planes, q->plane_sizes, q->alloc_ctx);
+
+		if (!ret && allocated_buffers < num_buffers)
+			ret = -ENOMEM;
+
+		/*
+		 * Either the driver has accepted a smaller number of buffers,
+		 * or .queue_setup() returned an error
+		 */
+	}
+
+	q->num_buffers = allocated_buffers;
+
+	if (ret < 0) {
+		__vb2_queue_free(q, allocated_buffers);
+		return ret;
+	}
+
+	/*
+	 * Return the number of successfully allocated buffers
+	 * to the userspace.
+	 */
+	req->count = allocated_buffers;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_reqbufs);
+
+/**
+ * vb2_create_bufs() - Allocate buffers and any required auxiliary structs
+ * @q:		videobuf2 queue
+ * @create:	creation parameters, passed from userspace to vidioc_create_bufs
+ *		handler in driver
+ *
+ * Should be called from vidioc_create_bufs ioctl handler of a driver.
+ * This function:
+ * 1) verifies parameter sanity
+ * 2) calls the .queue_setup() queue operation
+ * 3) performs any necessary memory allocations
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_create_bufs handler in driver.
+ */
+int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+{
+	unsigned int num_planes = 0, num_buffers, allocated_buffers;
+	int ret = 0;
+
+	if (q->fileio) {
+		dprintk(1, "%s(): file io in progress\n", __func__);
+		return -EBUSY;
+	}
+
+	if (create->memory != V4L2_MEMORY_MMAP
+			&& create->memory != V4L2_MEMORY_USERPTR) {
+		dprintk(1, "%s(): unsupported memory type\n", __func__);
+		return -EINVAL;
+	}
+
+	if (create->format.type != q->type) {
+		dprintk(1, "%s(): requested type is incorrect\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Make sure all the required memory ops for given memory type
+	 * are available.
+	 */
+	if (create->memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+		dprintk(1, "%s(): MMAP for current setup unsupported\n", __func__);
+		return -EINVAL;
+	}
+
+	if (create->memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+		dprintk(1, "%s(): USERPTR for current setup unsupported\n", __func__);
+		return -EINVAL;
+	}
+
+	if (q->num_buffers == VIDEO_MAX_FRAME) {
+		dprintk(1, "%s(): maximum number of buffers already allocated\n",
+			__func__);
+		return -ENOBUFS;
+	}
+
+	create->index = q->num_buffers;
+
+	if (!q->num_buffers) {
+		memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
+		memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+		q->memory = create->memory;
+	}
+
+	num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
+
+	/*
+	 * Ask the driver, whether the requested number of buffers, planes per
+	 * buffer and their sizes are acceptable
+	 */
+	ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
+		       &num_planes, q->plane_sizes, q->alloc_ctx);
+	if (ret)
+		return ret;
+
+	/* Finally, allocate buffers and video memory */
+	ret = __vb2_queue_alloc(q, create->memory, num_buffers,
+				num_planes);
+	if (ret < 0) {
+		dprintk(1, "Memory allocation failed with error: %d\n", ret);
+		return ret;
+	}
+
+	allocated_buffers = ret;
+
+	/*
+	 * Check if driver can handle the so far allocated number of buffers.
+	 */
+	if (ret < num_buffers) {
+		num_buffers = ret;
+
+		/*
+		 * q->num_buffers contains the total number of buffers, that the
+		 * queue driver has set up
+		 */
+		ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
+			       &num_planes, q->plane_sizes, q->alloc_ctx);
+
+		if (!ret && allocated_buffers < num_buffers)
+			ret = -ENOMEM;
+
+		/*
+		 * Either the driver has accepted a smaller number of buffers,
+		 * or .queue_setup() returned an error
+		 */
+	}
+
+	q->num_buffers += allocated_buffers;
+
+	if (ret < 0) {
+		__vb2_queue_free(q, allocated_buffers);
+		return ret;
+	}
+
+	/*
+	 * Return the number of successfully allocated buffers
+	 * to the userspace.
+	 */
+	create->count = allocated_buffers;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_create_bufs);
+
+/**
+ * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
+ * @vb:		vb2_buffer to which the plane in question belongs to
+ * @plane_no:	plane number for which the address is to be returned
+ *
+ * This function returns a kernel virtual address of a given plane if
+ * such a mapping exist, NULL otherwise.
+ */
+void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+
+	if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+		return NULL;
+
+	return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
+
+}
+EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
+
+/**
+ * vb2_plane_cookie() - Return allocator specific cookie for the given plane
+ * @vb:		vb2_buffer to which the plane in question belongs to
+ * @plane_no:	plane number for which the cookie is to be returned
+ *
+ * This function returns an allocator specific cookie for a given plane if
+ * available, NULL otherwise. The allocator should provide some simple static
+ * inline function, which would convert this cookie to the allocator specific
+ * type that can be used directly by the driver to access the buffer. This can
+ * be for example physical address, pointer to scatter list or IOMMU mapping.
+ */
+void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+
+	if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+		return NULL;
+
+	return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
+}
+EXPORT_SYMBOL_GPL(vb2_plane_cookie);
+
+/**
+ * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
+ * @vb:		vb2_buffer returned from the driver
+ * @state:	either VB2_BUF_STATE_DONE if the operation finished successfully
+ *		or VB2_BUF_STATE_ERROR if the operation finished with an error
+ *
+ * This function should be called by the driver after a hardware operation on
+ * a buffer is finished and the buffer may be returned to userspace. The driver
+ * cannot use this buffer anymore until it is queued back to it by videobuf
+ * by the means of buf_queue callback. Only buffers previously queued to the
+ * driver by buf_queue can be passed to this function.
+ */
+void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	unsigned long flags;
+
+	if (vb->state != VB2_BUF_STATE_ACTIVE)
+		return;
+
+	if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
+		return;
+
+	dprintk(4, "Done processing on buffer %d, state: %d\n",
+			vb->v4l2_buf.index, vb->state);
+
+	/* Add the buffer to the done buffers list */
+	spin_lock_irqsave(&q->done_lock, flags);
+	vb->state = state;
+	list_add_tail(&vb->done_entry, &q->done_list);
+	atomic_dec(&q->queued_count);
+	spin_unlock_irqrestore(&q->done_lock, flags);
+
+	/* Inform any processes that may be waiting for buffers */
+	wake_up(&q->done_wq);
+}
+EXPORT_SYMBOL_GPL(vb2_buffer_done);
+
+/**
+ * __fill_vb2_buffer() - fill a vb2_buffer with information provided in
+ * a v4l2_buffer by the userspace
+ */
+static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
+				struct v4l2_plane *v4l2_planes)
+{
+	unsigned int plane;
+	int ret;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+		/*
+		 * Verify that the userspace gave us a valid array for
+		 * plane information.
+		 */
+		ret = __verify_planes_array(vb, b);
+		if (ret)
+			return ret;
+
+		/* Fill in driver-provided information for OUTPUT types */
+		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+			/*
+			 * Will have to go up to b->length when API starts
+			 * accepting variable number of planes.
+			 */
+			for (plane = 0; plane < vb->num_planes; ++plane) {
+				v4l2_planes[plane].bytesused =
+					b->m.planes[plane].bytesused;
+				v4l2_planes[plane].data_offset =
+					b->m.planes[plane].data_offset;
+			}
+		}
+
+		if (b->memory == V4L2_MEMORY_USERPTR) {
+			for (plane = 0; plane < vb->num_planes; ++plane) {
+				v4l2_planes[plane].m.userptr =
+					b->m.planes[plane].m.userptr;
+				v4l2_planes[plane].length =
+					b->m.planes[plane].length;
+			}
+		}
+	} else {
+		/*
+		 * Single-planar buffers do not use planes array,
+		 * so fill in relevant v4l2_buffer struct fields instead.
+		 * In videobuf we use our internal V4l2_planes struct for
+		 * single-planar buffers as well, for simplicity.
+		 */
+		if (V4L2_TYPE_IS_OUTPUT(b->type))
+			v4l2_planes[0].bytesused = b->bytesused;
+
+		if (b->memory == V4L2_MEMORY_USERPTR) {
+			v4l2_planes[0].m.userptr = b->m.userptr;
+			v4l2_planes[0].length = b->length;
+		}
+	}
+
+	vb->v4l2_buf.field = b->field;
+	vb->v4l2_buf.timestamp = b->timestamp;
+	vb->v4l2_buf.input = b->input;
+	vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
+
+	return 0;
+}
+
+/**
+ * __qbuf_userptr() - handle qbuf of a USERPTR buffer
+ */
+static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+	struct v4l2_plane planes[VIDEO_MAX_PLANES];
+	struct vb2_queue *q = vb->vb2_queue;
+	void *mem_priv;
+	unsigned int plane;
+	int ret;
+	int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+	/* Verify and copy relevant information provided by the userspace */
+	ret = __fill_vb2_buffer(vb, b, planes);
+	if (ret)
+		return ret;
+
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		/* Skip the plane if already verified */
+		if (vb->v4l2_planes[plane].m.userptr &&
+		    vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
+		    && vb->v4l2_planes[plane].length == planes[plane].length)
+			continue;
+
+		dprintk(3, "qbuf: userspace address for plane %d changed, "
+				"reacquiring memory\n", plane);
+
+		/* Check if the provided plane buffer is large enough */
+		if (planes[plane].length < q->plane_sizes[plane]) {
+			ret = -EINVAL;
+			goto err;
+		}
+
+		/* Release previously acquired memory if present */
+		if (vb->planes[plane].mem_priv)
+			call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+
+		vb->planes[plane].mem_priv = NULL;
+		vb->v4l2_planes[plane].m.userptr = 0;
+		vb->v4l2_planes[plane].length = 0;
+
+		/* Acquire each plane's memory */
+		mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
+				      planes[plane].m.userptr,
+				      planes[plane].length, write);
+		if (IS_ERR_OR_NULL(mem_priv)) {
+			dprintk(1, "qbuf: failed acquiring userspace "
+						"memory for plane %d\n", plane);
+			ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
+			goto err;
+		}
+		vb->planes[plane].mem_priv = mem_priv;
+	}
+
+	/*
+	 * Call driver-specific initialization on the newly acquired buffer,
+	 * if provided.
+	 */
+	ret = call_qop(q, buf_init, vb);
+	if (ret) {
+		dprintk(1, "qbuf: buffer initialization failed\n");
+		goto err;
+	}
+
+	/*
+	 * Now that everything is in order, copy relevant information
+	 * provided by userspace.
+	 */
+	for (plane = 0; plane < vb->num_planes; ++plane)
+		vb->v4l2_planes[plane] = planes[plane];
+
+	return 0;
+err:
+	/* In case of errors, release planes that were already acquired */
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		if (vb->planes[plane].mem_priv)
+			call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+		vb->planes[plane].mem_priv = NULL;
+		vb->v4l2_planes[plane].m.userptr = 0;
+		vb->v4l2_planes[plane].length = 0;
+	}
+
+	return ret;
+}
+
+/**
+ * __qbuf_mmap() - handle qbuf of an MMAP buffer
+ */
+static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+	return __fill_vb2_buffer(vb, b, vb->v4l2_planes);
+}
+
+/**
+ * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
+ */
+static void __enqueue_in_driver(struct vb2_buffer *vb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+
+	vb->state = VB2_BUF_STATE_ACTIVE;
+	atomic_inc(&q->queued_count);
+	q->ops->buf_queue(vb);
+}
+
+static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	int ret;
+
+	switch (q->memory) {
+	case V4L2_MEMORY_MMAP:
+		ret = __qbuf_mmap(vb, b);
+		break;
+	case V4L2_MEMORY_USERPTR:
+		ret = __qbuf_userptr(vb, b);
+		break;
+	default:
+		WARN(1, "Invalid queue type\n");
+		ret = -EINVAL;
+	}
+
+	if (!ret)
+		ret = call_qop(q, buf_prepare, vb);
+	if (ret)
+		dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
+	else
+		vb->state = VB2_BUF_STATE_PREPARED;
+
+	return ret;
+}
+
+/**
+ * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
+ * @q:		videobuf2 queue
+ * @b:		buffer structure passed from userspace to vidioc_prepare_buf
+ *		handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_prepare callback in the driver (if provided), in which
+ *    driver-specific buffer initialization can be performed,
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
+int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+	struct vb2_buffer *vb;
+	int ret;
+
+	if (q->fileio) {
+		dprintk(1, "%s(): file io in progress\n", __func__);
+		return -EBUSY;
+	}
+
+	if (b->type != q->type) {
+		dprintk(1, "%s(): invalid buffer type\n", __func__);
+		return -EINVAL;
+	}
+
+	if (b->index >= q->num_buffers) {
+		dprintk(1, "%s(): buffer index out of range\n", __func__);
+		return -EINVAL;
+	}
+
+	vb = q->bufs[b->index];
+	if (NULL == vb) {
+		/* Should never happen */
+		dprintk(1, "%s(): buffer is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (b->memory != q->memory) {
+		dprintk(1, "%s(): invalid memory type\n", __func__);
+		return -EINVAL;
+	}
+
+	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+		dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
+		return -EINVAL;
+	}
+
+	ret = __buf_prepare(vb, b);
+	if (ret < 0)
+		return ret;
+
+	__fill_v4l2_buffer(vb, b);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_prepare_buf);
+
+/**
+ * vb2_qbuf() - Queue a buffer from userspace
+ * @q:		videobuf2 queue
+ * @b:		buffer structure passed from userspace to vidioc_qbuf handler
+ *		in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
+ *    which driver-specific buffer initialization can be performed,
+ * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
+ *    callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+	struct rw_semaphore *mmap_sem = NULL;
+	struct vb2_buffer *vb;
+	int ret = 0;
+
+	/*
+	 * In case of user pointer buffers vb2 allocator needs to get direct
+	 * access to userspace pages. This requires getting read access on
+	 * mmap semaphore in the current process structure. The same
+	 * semaphore is taken before calling mmap operation, while both mmap
+	 * and qbuf are called by the driver or v4l2 core with driver's lock
+	 * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in
+	 * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core
+	 * release driver's lock, takes mmap_sem and then takes again driver's
+	 * lock.
+	 *
+	 * To avoid race with other vb2 calls, which might be called after
+	 * releasing driver's lock, this operation is performed at the
+	 * beggining of qbuf processing. This way the queue status is
+	 * consistent after getting driver's lock back.
+	 */
+	if (q->memory == V4L2_MEMORY_USERPTR) {
+		mmap_sem = &current->active_mm->mmap_sem;
+		call_qop(q, wait_prepare, q);
+		down_read(mmap_sem);
+		call_qop(q, wait_finish, q);
+	}
+
+	if (q->fileio) {
+		dprintk(1, "qbuf: file io in progress\n");
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	if (b->type != q->type) {
+		dprintk(1, "qbuf: invalid buffer type\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if (b->index >= q->num_buffers) {
+		dprintk(1, "qbuf: buffer index out of range\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	vb = q->bufs[b->index];
+	if (NULL == vb) {
+		/* Should never happen */
+		dprintk(1, "qbuf: buffer is NULL\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if (b->memory != q->memory) {
+		dprintk(1, "qbuf: invalid memory type\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	switch (vb->state) {
+	case VB2_BUF_STATE_DEQUEUED:
+		ret = __buf_prepare(vb, b);
+		if (ret)
+			goto unlock;
+	case VB2_BUF_STATE_PREPARED:
+		break;
+	default:
+		dprintk(1, "qbuf: buffer already in use\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	/*
+	 * Add to the queued buffers list, a buffer will stay on it until
+	 * dequeued in dqbuf.
+	 */
+	list_add_tail(&vb->queued_entry, &q->queued_list);
+	vb->state = VB2_BUF_STATE_QUEUED;
+
+	/*
+	 * If already streaming, give the buffer to driver for processing.
+	 * If not, the buffer will be given to driver on next streamon.
+	 */
+	if (q->streaming)
+		__enqueue_in_driver(vb);
+
+	/* Fill buffer information for the userspace */
+	__fill_v4l2_buffer(vb, b);
+
+	dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
+unlock:
+	if (mmap_sem)
+		up_read(mmap_sem);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_qbuf);
+
+/**
+ * __vb2_wait_for_done_vb() - wait for a buffer to become available
+ * for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+{
+	/*
+	 * All operations on vb_done_list are performed under done_lock
+	 * spinlock protection. However, buffers may be removed from
+	 * it and returned to userspace only while holding both driver's
+	 * lock and the done_lock spinlock. Thus we can be sure that as
+	 * long as we hold the driver's lock, the list will remain not
+	 * empty if list_empty() check succeeds.
+	 */
+
+	for (;;) {
+		int ret;
+
+		if (!q->streaming) {
+			dprintk(1, "Streaming off, will not wait for buffers\n");
+			return -EINVAL;
+		}
+
+		if (!list_empty(&q->done_list)) {
+			/*
+			 * Found a buffer that we were waiting for.
+			 */
+			break;
+		}
+
+		if (nonblocking) {
+			dprintk(1, "Nonblocking and no buffers to dequeue, "
+								"will not wait\n");
+			return -EAGAIN;
+		}
+
+		/*
+		 * We are streaming and blocking, wait for another buffer to
+		 * become ready or for streamoff. Driver's lock is released to
+		 * allow streamoff or qbuf to be called while waiting.
+		 */
+		call_qop(q, wait_prepare, q);
+
+		/*
+		 * All locks have been released, it is safe to sleep now.
+		 */
+		dprintk(3, "Will sleep waiting for buffers\n");
+		ret = wait_event_interruptible(q->done_wq,
+				!list_empty(&q->done_list) || !q->streaming);
+
+		/*
+		 * We need to reevaluate both conditions again after reacquiring
+		 * the locks or return an error if one occurred.
+		 */
+		call_qop(q, wait_finish, q);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/**
+ * __vb2_get_done_vb() - get a buffer ready for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+				int nonblocking)
+{
+	unsigned long flags;
+	int ret;
+
+	/*
+	 * Wait for at least one buffer to become available on the done_list.
+	 */
+	ret = __vb2_wait_for_done_vb(q, nonblocking);
+	if (ret)
+		return ret;
+
+	/*
+	 * Driver's lock has been held since we last verified that done_list
+	 * is not empty, so no need for another list_empty(done_list) check.
+	 */
+	spin_lock_irqsave(&q->done_lock, flags);
+	*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
+	list_del(&(*vb)->done_entry);
+	spin_unlock_irqrestore(&q->done_lock, flags);
+
+	return 0;
+}
+
+/**
+ * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
+ * @q:		videobuf2 queue
+ *
+ * This function will wait until all buffers that have been given to the driver
+ * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
+ * wait_prepare, wait_finish pair. It is intended to be called with all locks
+ * taken, for example from stop_streaming() callback.
+ */
+int vb2_wait_for_all_buffers(struct vb2_queue *q)
+{
+	if (!q->streaming) {
+		dprintk(1, "Streaming off, will not wait for buffers\n");
+		return -EINVAL;
+	}
+
+	wait_event(q->done_wq, !atomic_read(&q->queued_count));
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q:		videobuf2 queue
+ * @b:		buffer structure passed from userspace to vidioc_dqbuf handler
+ *		in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ *		 buffers ready for dequeuing are present. Normally the driver
+ *		 would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_finish callback in the driver (if provided), in which
+ *    driver can perform any additional operations that may be required before
+ *    returning the buffer to userspace, such as cache sync,
+ * 3) the buffer struct members are filled with relevant information for
+ *    the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+	struct vb2_buffer *vb = NULL;
+	int ret;
+
+	if (q->fileio) {
+		dprintk(1, "dqbuf: file io in progress\n");
+		return -EBUSY;
+	}
+
+	if (b->type != q->type) {
+		dprintk(1, "dqbuf: invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&q->q_lock);
+	ret = __vb2_get_done_vb(q, &vb, nonblocking);
+	if (ret < 0) {
+		dprintk(1, "dqbuf: error getting next done buffer\n");
+		mutex_unlock(&q->q_lock);
+		return ret;
+	}
+	mutex_unlock(&q->q_lock);
+	ret = call_qop(q, buf_finish, vb);
+	if (ret) {
+		dprintk(1, "dqbuf: buffer finish failed\n");
+		return ret;
+	}
+
+	switch (vb->state) {
+	case VB2_BUF_STATE_DONE:
+		dprintk(3, "dqbuf: Returning done buffer\n");
+		break;
+	case VB2_BUF_STATE_ERROR:
+		dprintk(3, "dqbuf: Returning done buffer with errors\n");
+		break;
+	default:
+		dprintk(1, "dqbuf: Invalid buffer state\n");
+		return -EINVAL;
+	}
+
+	/* Fill buffer information for the userspace */
+	__fill_v4l2_buffer(vb, b);
+	/* Remove from videobuf queue */
+	list_del(&vb->queued_entry);
+
+	dprintk(1, "dqbuf of buffer %d, with state %d\n",
+			vb->v4l2_buf.index, vb->state);
+
+	vb->state = VB2_BUF_STATE_DEQUEUED;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_dqbuf);
+
+/**
+ * __vb2_queue_cancel() - cancel and stop (pause) streaming
+ *
+ * Removes all queued buffers from driver's queue and all buffers queued by
+ * userspace from videobuf's queue. Returns to state after reqbufs.
+ */
+static void __vb2_queue_cancel(struct vb2_queue *q)
+{
+	unsigned int i;
+
+	/*
+	 * Tell driver to stop all transactions and release all queued
+	 * buffers.
+	 */
+	if (q->streaming)
+		call_qop(q, stop_streaming, q);
+	q->streaming = 0;
+
+	/*
+	 * Remove all buffers from videobuf's list...
+	 */
+	INIT_LIST_HEAD(&q->queued_list);
+	/*
+	 * ...and done list; userspace will not receive any buffers it
+	 * has not already dequeued before initiating cancel.
+	 */
+	INIT_LIST_HEAD(&q->done_list);
+	atomic_set(&q->queued_count, 0);
+	wake_up_all(&q->done_wq);
+
+	/*
+	 * Reinitialize all buffers for next use.
+	 */
+	for (i = 0; i < q->num_buffers; ++i)
+		q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
+}
+
+/**
+ * vb2_streamon - start streaming
+ * @q:		videobuf2 queue
+ * @type:	type argument passed from userspace to vidioc_streamon handler
+ *
+ * Should be called from vidioc_streamon handler of a driver.
+ * This function:
+ * 1) verifies current state
+ * 2) passes any previously queued buffers to the driver and starts streaming
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamon handler in the driver.
+ */
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+	struct vb2_buffer *vb;
+	int ret;
+
+	if (q->fileio) {
+		dprintk(1, "streamon: file io in progress\n");
+		return -EBUSY;
+	}
+
+	if (type != q->type) {
+		dprintk(1, "streamon: invalid stream type\n");
+		return -EINVAL;
+	}
+
+	if (q->streaming) {
+		dprintk(1, "streamon: already streaming\n");
+		return -EBUSY;
+	}
+
+	/*
+	 * If any buffers were queued before streamon,
+	 * we can now pass them to driver for processing.
+	 */
+	list_for_each_entry(vb, &q->queued_list, queued_entry)
+		__enqueue_in_driver(vb);
+
+	/*
+	 * Let driver notice that streaming state has been enabled.
+	 */
+	mutex_lock(&q->q_lock);
+	ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
+	if (ret) {
+		dprintk(1, "streamon: driver refused to start streaming\n");
+		__vb2_queue_cancel(q);
+		mutex_unlock(&q->q_lock);
+		return ret;
+	}
+
+	q->streaming = 1;
+	mutex_unlock(&q->q_lock);
+	dprintk(3, "Streamon successful\n");
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+
+
+/**
+ * vb2_streamoff - stop streaming
+ * @q:		videobuf2 queue
+ * @type:	type argument passed from userspace to vidioc_streamoff handler
+ *
+ * Should be called from vidioc_streamoff handler of a driver.
+ * This function:
+ * 1) verifies current state,
+ * 2) stop streaming and dequeues any queued buffers, including those previously
+ *    passed to the driver (after waiting for the driver to finish).
+ *
+ * This call can be used for pausing playback.
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamoff handler in the driver
+ */
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+	if (q->fileio) {
+		dprintk(1, "streamoff: file io in progress\n");
+		return -EBUSY;
+	}
+
+	if (type != q->type) {
+		dprintk(1, "streamoff: invalid stream type\n");
+		return -EINVAL;
+	}
+
+	if (!q->streaming) {
+		dprintk(1, "streamoff: not streaming\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Cancel will pause streaming and remove all buffers from the driver
+	 * and videobuf, effectively returning control over them to userspace.
+	 */
+	__vb2_queue_cancel(q);
+
+	dprintk(3, "Streamoff successful\n");
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_streamoff);
+
+/**
+ * __find_plane_by_offset() - find plane associated with the given offset off
+ */
+static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
+			unsigned int *_buffer, unsigned int *_plane)
+{
+	struct vb2_buffer *vb;
+	unsigned int buffer, plane;
+
+	/*
+	 * Go over all buffers and their planes, comparing the given offset
+	 * with an offset assigned to each plane. If a match is found,
+	 * return its buffer and plane numbers.
+	 */
+	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+		vb = q->bufs[buffer];
+
+		for (plane = 0; plane < vb->num_planes; ++plane) {
+			if (vb->v4l2_planes[plane].m.mem_offset == off) {
+				*_buffer = buffer;
+				*_plane = plane;
+				return 0;
+			}
+		}
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * vb2_mmap() - map video buffers into application address space
+ * @q:		videobuf2 queue
+ * @vma:	vma passed to the mmap file operation handler in the driver
+ *
+ * Should be called from mmap file operation handler of a driver.
+ * This function maps one plane of one of the available video buffers to
+ * userspace. To map whole video memory allocated on reqbufs, this function
+ * has to be called once per each plane per each buffer previously allocated.
+ *
+ * When the userspace application calls mmap, it passes to it an offset returned
+ * to it earlier by the means of vidioc_querybuf handler. That offset acts as
+ * a "cookie", which is then used to identify the plane to be mapped.
+ * This function finds a plane with a matching offset and a mapping is performed
+ * by the means of a provided memory operation.
+ *
+ * The return values from this function are intended to be directly returned
+ * from the mmap handler in driver.
+ */
+int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+{
+	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+	struct vb2_buffer *vb;
+	unsigned int buffer, plane;
+	int ret;
+
+	if (q->memory != V4L2_MEMORY_MMAP) {
+		dprintk(1, "Queue is not currently set up for mmap\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check memory area access mode.
+	 */
+	if (!(vma->vm_flags & VM_SHARED)) {
+		dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
+		return -EINVAL;
+	}
+	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+		if (!(vma->vm_flags & VM_WRITE)) {
+			dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
+			return -EINVAL;
+		}
+	} else {
+		if (!(vma->vm_flags & VM_READ)) {
+			dprintk(1, "Invalid vma flags, VM_READ needed\n");
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * Find the plane corresponding to the offset passed by userspace.
+	 */
+	ret = __find_plane_by_offset(q, off, &buffer, &plane);
+	if (ret)
+		return ret;
+
+	vb = q->bufs[buffer];
+
+	ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
+	if (ret)
+		return ret;
+
+	dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_mmap);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
+				    unsigned long addr,
+				    unsigned long len,
+				    unsigned long pgoff,
+				    unsigned long flags)
+{
+	unsigned long off = pgoff << PAGE_SHIFT;
+	struct vb2_buffer *vb;
+	unsigned int buffer, plane;
+	int ret;
+
+	if (q->memory != V4L2_MEMORY_MMAP) {
+		dprintk(1, "Queue is not currently set up for mmap\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Find the plane corresponding to the offset passed by userspace.
+	 */
+	ret = __find_plane_by_offset(q, off, &buffer, &plane);
+	if (ret)
+		return ret;
+
+	vb = q->bufs[buffer];
+
+	return (unsigned long)vb2_plane_vaddr(vb, plane);
+}
+EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
+#endif
+
+static int __vb2_init_fileio(struct vb2_queue *q, int read);
+static int __vb2_cleanup_fileio(struct vb2_queue *q);
+
+/**
+ * vb2_poll() - implements poll userspace operation
+ * @q:		videobuf2 queue
+ * @file:	file argument passed to the poll file operation handler
+ * @wait:	wait argument passed to the poll file operation handler
+ *
+ * This function implements poll file operation handler for a driver.
+ * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
+ * be informed that the file descriptor of a video device is available for
+ * reading.
+ * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
+ * will be reported as available for writing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from poll handler in driver.
+ */
+unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+{
+	unsigned long flags;
+	unsigned int ret;
+	struct vb2_buffer *vb = NULL;
+
+	/*
+	 * Start file I/O emulator only if streaming API has not been used yet.
+	 */
+	if (q->num_buffers == 0 && q->fileio == NULL) {
+		if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ)) {
+			ret = __vb2_init_fileio(q, 1);
+			if (ret)
+				return POLLERR;
+		}
+		if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE)) {
+			ret = __vb2_init_fileio(q, 0);
+			if (ret)
+				return POLLERR;
+			/*
+			 * Write to OUTPUT queue can be done immediately.
+			 */
+			return POLLOUT | POLLWRNORM;
+		}
+	}
+
+	/*
+	 * There is nothing to wait for if no buffers have already been queued.
+	 */
+	if (list_empty(&q->queued_list))
+		return POLLERR;
+
+	poll_wait(file, &q->done_wq, wait);
+
+	/*
+	 * Take first buffer available for dequeuing.
+	 */
+	spin_lock_irqsave(&q->done_lock, flags);
+	if (!list_empty(&q->done_list))
+		vb = list_first_entry(&q->done_list, struct vb2_buffer,
+					done_entry);
+	spin_unlock_irqrestore(&q->done_lock, flags);
+
+	if (vb && (vb->state == VB2_BUF_STATE_DONE
+			|| vb->state == VB2_BUF_STATE_ERROR)) {
+		return (V4L2_TYPE_IS_OUTPUT(q->type)) ? POLLOUT | POLLWRNORM :
+			POLLIN | POLLRDNORM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_poll);
+
+/**
+ * vb2_queue_init() - initialize a videobuf2 queue
+ * @q:		videobuf2 queue; this structure should be allocated in driver
+ *
+ * The vb2_queue structure should be allocated by the driver. The driver is
+ * responsible of clearing it's content and setting initial values for some
+ * required entries before calling this function.
+ * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
+ * for more information.
+ */
+int vb2_queue_init(struct vb2_queue *q)
+{
+	BUG_ON(!q);
+	BUG_ON(!q->ops);
+	BUG_ON(!q->mem_ops);
+	BUG_ON(!q->type);
+	BUG_ON(!q->io_modes);
+
+	BUG_ON(!q->ops->queue_setup);
+	BUG_ON(!q->ops->buf_queue);
+
+	INIT_LIST_HEAD(&q->queued_list);
+	INIT_LIST_HEAD(&q->done_list);
+	spin_lock_init(&q->done_lock);
+	mutex_init(&q->q_lock);
+	init_waitqueue_head(&q->done_wq);
+
+	if (q->buf_struct_size == 0)
+		q->buf_struct_size = sizeof(struct vb2_buffer);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_queue_init);
+
+/**
+ * vb2_queue_release() - stop streaming, release the queue and free memory
+ * @q:		videobuf2 queue
+ *
+ * This function stops streaming and performs necessary clean ups, including
+ * freeing video buffer memory. The driver is responsible for freeing
+ * the vb2_queue structure itself.
+ */
+void vb2_queue_release(struct vb2_queue *q)
+{
+	__vb2_cleanup_fileio(q);
+	__vb2_queue_cancel(q);
+	__vb2_queue_free(q, q->num_buffers);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_release);
+
+/**
+ * struct vb2_fileio_buf - buffer context used by file io emulator
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. This structure is used for
+ * tracking context related to the buffers.
+ */
+struct vb2_fileio_buf {
+	void *vaddr;
+	unsigned int size;
+	unsigned int pos;
+	unsigned int queued:1;
+};
+
+/**
+ * struct vb2_fileio_data - queue context used by file io emulator
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. For proper operation it required
+ * this structure to save the driver state between each call of the read
+ * or write function.
+ */
+struct vb2_fileio_data {
+	struct v4l2_requestbuffers req;
+	struct v4l2_buffer b;
+	struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
+	unsigned int index;
+	unsigned int q_count;
+	unsigned int dq_count;
+	unsigned int flags;
+};
+
+/**
+ * __vb2_init_fileio() - initialize file io emulator
+ * @q:		videobuf2 queue
+ * @read:	mode selector (1 means read, 0 means write)
+ */
+static int __vb2_init_fileio(struct vb2_queue *q, int read)
+{
+	struct vb2_fileio_data *fileio;
+	int i, ret;
+	unsigned int count = 0;
+
+	/*
+	 * Sanity check
+	 */
+	if ((read && !(q->io_modes & VB2_READ)) ||
+	   (!read && !(q->io_modes & VB2_WRITE)))
+		BUG();
+
+	/*
+	 * Check if device supports mapping buffers to kernel virtual space.
+	 */
+	if (!q->mem_ops->vaddr)
+		return -EBUSY;
+
+	/*
+	 * Check if streaming api has not been already activated.
+	 */
+	if (q->streaming || q->num_buffers > 0)
+		return -EBUSY;
+
+	/*
+	 * Start with count 1, driver can increase it in queue_setup()
+	 */
+	count = 1;
+
+	dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
+		(read) ? "read" : "write", count, q->io_flags);
+
+	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
+	if (fileio == NULL)
+		return -ENOMEM;
+
+	fileio->flags = q->io_flags;
+
+	/*
+	 * Request buffers and use MMAP type to force driver
+	 * to allocate buffers by itself.
+	 */
+	fileio->req.count = count;
+	fileio->req.memory = V4L2_MEMORY_MMAP;
+	fileio->req.type = q->type;
+	ret = vb2_reqbufs(q, &fileio->req);
+	if (ret)
+		goto err_kfree;
+
+	/*
+	 * Check if plane_count is correct
+	 * (multiplane buffers are not supported).
+	 */
+	if (q->bufs[0]->num_planes != 1) {
+		fileio->req.count = 0;
+		ret = -EBUSY;
+		goto err_reqbufs;
+	}
+
+	/*
+	 * Get kernel address of each buffer.
+	 */
+	for (i = 0; i < q->num_buffers; i++) {
+		fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
+		if (fileio->bufs[i].vaddr == NULL)
+			goto err_reqbufs;
+		fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
+	}
+
+	/*
+	 * Read mode requires pre queuing of all buffers.
+	 */
+	if (read) {
+		/*
+		 * Queue all buffers.
+		 */
+		for (i = 0; i < q->num_buffers; i++) {
+			struct v4l2_buffer *b = &fileio->b;
+			memset(b, 0, sizeof(*b));
+			b->type = q->type;
+			b->memory = q->memory;
+			b->index = i;
+			ret = vb2_qbuf(q, b);
+			if (ret)
+				goto err_reqbufs;
+			fileio->bufs[i].queued = 1;
+		}
+
+		/*
+		 * Start streaming.
+		 */
+		ret = vb2_streamon(q, q->type);
+		if (ret)
+			goto err_reqbufs;
+	}
+
+	q->fileio = fileio;
+
+	return ret;
+
+err_reqbufs:
+	vb2_reqbufs(q, &fileio->req);
+
+err_kfree:
+	kfree(fileio);
+	return ret;
+}
+
+/**
+ * __vb2_cleanup_fileio() - free resourced used by file io emulator
+ * @q:		videobuf2 queue
+ */
+static int __vb2_cleanup_fileio(struct vb2_queue *q)
+{
+	struct vb2_fileio_data *fileio = q->fileio;
+
+	if (fileio) {
+		/*
+		 * Hack fileio context to enable direct calls to vb2 ioctl
+		 * interface.
+		 */
+		q->fileio = NULL;
+
+		vb2_streamoff(q, q->type);
+		fileio->req.count = 0;
+		vb2_reqbufs(q, &fileio->req);
+		kfree(fileio);
+		dprintk(3, "file io emulator closed\n");
+	}
+	return 0;
+}
+
+/**
+ * __vb2_perform_fileio() - perform a single file io (read or write) operation
+ * @q:		videobuf2 queue
+ * @data:	pointed to target userspace buffer
+ * @count:	number of bytes to read or write
+ * @ppos:	file handle position tracking pointer
+ * @nonblock:	mode selector (1 means blocking calls, 0 means nonblocking)
+ * @read:	access mode selector (1 means read, 0 means write)
+ */
+static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
+		loff_t *ppos, int nonblock, int read)
+{
+	struct vb2_fileio_data *fileio;
+	struct vb2_fileio_buf *buf;
+	int ret, index;
+
+	dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
+		read ? "read" : "write", (long)*ppos, count,
+		nonblock ? "non" : "");
+
+	if (!data)
+		return -EINVAL;
+
+	/*
+	 * Initialize emulator on first call.
+	 */
+	if (!q->fileio) {
+		ret = __vb2_init_fileio(q, read);
+		dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
+		if (ret)
+			return ret;
+	}
+	fileio = q->fileio;
+
+	/*
+	 * Hack fileio context to enable direct calls to vb2 ioctl interface.
+	 * The pointer will be restored before returning from this function.
+	 */
+	q->fileio = NULL;
+
+	index = fileio->index;
+	buf = &fileio->bufs[index];
+
+	/*
+	 * Check if we need to dequeue the buffer.
+	 */
+	if (buf->queued) {
+		struct vb2_buffer *vb;
+
+		/*
+		 * Call vb2_dqbuf to get buffer back.
+		 */
+		memset(&fileio->b, 0, sizeof(fileio->b));
+		fileio->b.type = q->type;
+		fileio->b.memory = q->memory;
+		fileio->b.index = index;
+		ret = vb2_dqbuf(q, &fileio->b, nonblock);
+		dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+		if (ret)
+			goto end;
+		fileio->dq_count += 1;
+
+		/*
+		 * Get number of bytes filled by the driver
+		 */
+		vb = q->bufs[index];
+		buf->size = vb2_get_plane_payload(vb, 0);
+		buf->queued = 0;
+	}
+
+	/*
+	 * Limit count on last few bytes of the buffer.
+	 */
+	if (buf->pos + count > buf->size) {
+		count = buf->size - buf->pos;
+		dprintk(5, "reducing read count: %zd\n", count);
+	}
+
+	/*
+	 * Transfer data to userspace.
+	 */
+	dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
+		count, index, buf->pos);
+	if (read)
+		ret = copy_to_user(data, buf->vaddr + buf->pos, count);
+	else
+		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
+	if (ret) {
+		dprintk(3, "file io: error copying data\n");
+		ret = -EFAULT;
+		goto end;
+	}
+
+	/*
+	 * Update counters.
+	 */
+	buf->pos += count;
+	*ppos += count;
+
+	/*
+	 * Queue next buffer if required.
+	 */
+	if (buf->pos == buf->size ||
+	   (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
+		/*
+		 * Check if this is the last buffer to read.
+		 */
+		if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
+		    fileio->dq_count == 1) {
+			dprintk(3, "file io: read limit reached\n");
+			/*
+			 * Restore fileio pointer and release the context.
+			 */
+			q->fileio = fileio;
+			return __vb2_cleanup_fileio(q);
+		}
+
+		/*
+		 * Call vb2_qbuf and give buffer to the driver.
+		 */
+		memset(&fileio->b, 0, sizeof(fileio->b));
+		fileio->b.type = q->type;
+		fileio->b.memory = q->memory;
+		fileio->b.index = index;
+		fileio->b.bytesused = buf->pos;
+		ret = vb2_qbuf(q, &fileio->b);
+		dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
+		if (ret)
+			goto end;
+
+		/*
+		 * Buffer has been queued, update the status
+		 */
+		buf->pos = 0;
+		buf->queued = 1;
+		buf->size = q->bufs[0]->v4l2_planes[0].length;
+		fileio->q_count += 1;
+
+		/*
+		 * Switch to the next buffer
+		 */
+		fileio->index = (index + 1) % q->num_buffers;
+
+		/*
+		 * Start streaming if required.
+		 */
+		if (!read && !q->streaming) {
+			ret = vb2_streamon(q, q->type);
+			if (ret)
+				goto end;
+		}
+	}
+
+	/*
+	 * Return proper number of bytes processed.
+	 */
+	if (ret == 0)
+		ret = count;
+end:
+	/*
+	 * Restore the fileio context and block vb2 ioctl interface.
+	 */
+	q->fileio = fileio;
+	return ret;
+}
+
+size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
+		loff_t *ppos, int nonblocking)
+{
+	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
+}
+EXPORT_SYMBOL_GPL(vb2_read);
+
+size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+		loff_t *ppos, int nonblocking)
+{
+	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
+}
+EXPORT_SYMBOL_GPL(vb2_write);
+
+MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
+MODULE_LICENSE("GPL");
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf2-dma-contig.c b/include/media/videobuf2-dma-contig.c
new file mode 100755
index 0000000..4b71326
--- /dev/null
+++ b/include/media/videobuf2-dma-contig.c
@@ -0,0 +1,186 @@
+/*
+ * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_dc_conf {
+	struct device		*dev;
+};
+
+struct vb2_dc_buf {
+	struct vb2_dc_conf		*conf;
+	void				*vaddr;
+	dma_addr_t			dma_addr;
+	unsigned long			size;
+	struct vm_area_struct		*vma;
+	atomic_t			refcount;
+	struct vb2_vmarea_handler	handler;
+};
+
+static void vb2_dma_contig_put(void *buf_priv);
+
+static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
+{
+	struct vb2_dc_conf *conf = alloc_ctx;
+	struct vb2_dc_buf *buf;
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr,
+					GFP_KERNEL);
+	if (!buf->vaddr) {
+		dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
+			size);
+		kfree(buf);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	buf->conf = conf;
+	buf->size = size;
+
+	buf->handler.refcount = &buf->refcount;
+	buf->handler.put = vb2_dma_contig_put;
+	buf->handler.arg = buf;
+
+	atomic_inc(&buf->refcount);
+
+	return buf;
+}
+
+static void vb2_dma_contig_put(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+
+	if (atomic_dec_and_test(&buf->refcount)) {
+		dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
+				  buf->dma_addr);
+		kfree(buf);
+	}
+}
+
+static void *vb2_dma_contig_cookie(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+
+	return &buf->dma_addr;
+}
+
+static void *vb2_dma_contig_vaddr(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+	if (!buf)
+		return NULL;
+
+	return buf->vaddr;
+}
+
+static unsigned int vb2_dma_contig_num_users(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+
+	return atomic_read(&buf->refcount);
+}
+
+static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+
+	if (!buf) {
+		printk(KERN_ERR "No buffer to map\n");
+		return -EINVAL;
+	}
+
+	return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
+				  &vb2_common_vm_ops, &buf->handler);
+}
+
+static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
+					unsigned long size, int write)
+{
+	struct vb2_dc_buf *buf;
+	struct vm_area_struct *vma;
+	dma_addr_t dma_addr = 0;
+	int ret;
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr);
+	if (ret) {
+		printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
+				vaddr);
+		kfree(buf);
+		return ERR_PTR(ret);
+	}
+
+	buf->size = size;
+	buf->dma_addr = dma_addr;
+	buf->vma = vma;
+
+	return buf;
+}
+
+static void vb2_dma_contig_put_userptr(void *mem_priv)
+{
+	struct vb2_dc_buf *buf = mem_priv;
+
+	if (!buf)
+		return;
+
+	vb2_put_vma(buf->vma);
+	kfree(buf);
+}
+
+const struct vb2_mem_ops vb2_dma_contig_memops = {
+	.alloc		= vb2_dma_contig_alloc,
+	.put		= vb2_dma_contig_put,
+	.cookie		= vb2_dma_contig_cookie,
+	.vaddr		= vb2_dma_contig_vaddr,
+	.mmap		= vb2_dma_contig_mmap,
+	.get_userptr	= vb2_dma_contig_get_userptr,
+	.put_userptr	= vb2_dma_contig_put_userptr,
+	.num_users	= vb2_dma_contig_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
+
+void *vb2_dma_contig_init_ctx(struct device *dev)
+{
+	struct vb2_dc_conf *conf;
+
+	conf = kzalloc(sizeof *conf, GFP_KERNEL);
+	if (!conf)
+		return ERR_PTR(-ENOMEM);
+
+	conf->dev = dev;
+
+	return conf;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
+
+void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
+{
+	kfree(alloc_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
+
+MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf2-dma-sg.c b/include/media/videobuf2-dma-sg.c
new file mode 100755
index 0000000..25c3b36
--- /dev/null
+++ b/include/media/videobuf2-dma-sg.c
@@ -0,0 +1,283 @@
+/*
+ * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+#include <media/videobuf2-dma-sg.h>
+
+struct vb2_dma_sg_buf {
+	void				*vaddr;
+	struct page			**pages;
+	int				write;
+	int				offset;
+	struct vb2_dma_sg_desc		sg_desc;
+	atomic_t			refcount;
+	struct vb2_vmarea_handler	handler;
+};
+
+static void vb2_dma_sg_put(void *buf_priv);
+
+static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
+{
+	struct vb2_dma_sg_buf *buf;
+	int i;
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return NULL;
+
+	buf->vaddr = NULL;
+	buf->write = 0;
+	buf->offset = 0;
+	buf->sg_desc.size = size;
+	buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
+				      sizeof(*buf->sg_desc.sglist));
+	if (!buf->sg_desc.sglist)
+		goto fail_sglist_alloc;
+	sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+
+	buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+			     GFP_KERNEL);
+	if (!buf->pages)
+		goto fail_pages_array_alloc;
+
+	for (i = 0; i < buf->sg_desc.num_pages; ++i) {
+		buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
+		if (NULL == buf->pages[i])
+			goto fail_pages_alloc;
+		sg_set_page(&buf->sg_desc.sglist[i],
+			    buf->pages[i], PAGE_SIZE, 0);
+	}
+
+	buf->handler.refcount = &buf->refcount;
+	buf->handler.put = vb2_dma_sg_put;
+	buf->handler.arg = buf;
+
+	atomic_inc(&buf->refcount);
+
+	printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
+		__func__, buf->sg_desc.num_pages);
+	return buf;
+
+fail_pages_alloc:
+	while (--i >= 0)
+		__free_page(buf->pages[i]);
+	kfree(buf->pages);
+
+fail_pages_array_alloc:
+	vfree(buf->sg_desc.sglist);
+
+fail_sglist_alloc:
+	kfree(buf);
+	return NULL;
+}
+
+static void vb2_dma_sg_put(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	int i = buf->sg_desc.num_pages;
+
+	if (atomic_dec_and_test(&buf->refcount)) {
+		printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
+			buf->sg_desc.num_pages);
+		if (buf->vaddr)
+			vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+		vfree(buf->sg_desc.sglist);
+		while (--i >= 0)
+			__free_page(buf->pages[i]);
+		kfree(buf->pages);
+		kfree(buf);
+	}
+}
+
+static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
+				    unsigned long size, int write)
+{
+	struct vb2_dma_sg_buf *buf;
+	unsigned long first, last;
+	int num_pages_from_user, i;
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return NULL;
+
+	buf->vaddr = NULL;
+	buf->write = write;
+	buf->offset = vaddr & ~PAGE_MASK;
+	buf->sg_desc.size = size;
+
+	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
+	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
+	buf->sg_desc.num_pages = last - first + 1;
+
+	buf->sg_desc.sglist = vzalloc(
+		buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
+	if (!buf->sg_desc.sglist)
+		goto userptr_fail_sglist_alloc;
+
+	sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+
+	buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+			     GFP_KERNEL);
+	if (!buf->pages)
+		goto userptr_fail_pages_array_alloc;
+
+	num_pages_from_user = get_user_pages(current, current->mm,
+					     vaddr & PAGE_MASK,
+					     buf->sg_desc.num_pages,
+					     write,
+					     1, /* force */
+					     buf->pages,
+					     NULL);
+
+	if (num_pages_from_user != buf->sg_desc.num_pages)
+		goto userptr_fail_get_user_pages;
+
+	sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
+		    PAGE_SIZE - buf->offset, buf->offset);
+	size -= PAGE_SIZE - buf->offset;
+	for (i = 1; i < buf->sg_desc.num_pages; ++i) {
+		sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
+			    min_t(size_t, PAGE_SIZE, size), 0);
+		size -= min_t(size_t, PAGE_SIZE, size);
+	}
+	return buf;
+
+userptr_fail_get_user_pages:
+	printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
+	       num_pages_from_user, buf->sg_desc.num_pages);
+	while (--num_pages_from_user >= 0)
+		put_page(buf->pages[num_pages_from_user]);
+	kfree(buf->pages);
+
+userptr_fail_pages_array_alloc:
+	vfree(buf->sg_desc.sglist);
+
+userptr_fail_sglist_alloc:
+	kfree(buf);
+	return NULL;
+}
+
+/*
+ * @put_userptr: inform the allocator that a USERPTR buffer will no longer
+ *		 be used
+ */
+static void vb2_dma_sg_put_userptr(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	int i = buf->sg_desc.num_pages;
+
+	printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
+	       __func__, buf->sg_desc.num_pages);
+	if (buf->vaddr)
+		vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+	while (--i >= 0) {
+		if (buf->write)
+			set_page_dirty_lock(buf->pages[i]);
+		put_page(buf->pages[i]);
+	}
+	vfree(buf->sg_desc.sglist);
+	kfree(buf->pages);
+	kfree(buf);
+}
+
+static void *vb2_dma_sg_vaddr(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+
+	BUG_ON(!buf);
+
+	if (!buf->vaddr)
+		buf->vaddr = vm_map_ram(buf->pages,
+					buf->sg_desc.num_pages,
+					-1,
+					PAGE_KERNEL);
+
+	/* add offset in case userptr is not page-aligned */
+	return buf->vaddr + buf->offset;
+}
+
+static unsigned int vb2_dma_sg_num_users(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+
+	return atomic_read(&buf->refcount);
+}
+
+static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	unsigned long uaddr = vma->vm_start;
+	unsigned long usize = vma->vm_end - vma->vm_start;
+	int i = 0;
+
+	if (!buf) {
+		printk(KERN_ERR "No memory to map\n");
+		return -EINVAL;
+	}
+
+	do {
+		int ret;
+
+		ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
+		if (ret) {
+			printk(KERN_ERR "Remapping memory, error: %d\n", ret);
+			return ret;
+		}
+
+		uaddr += PAGE_SIZE;
+		usize -= PAGE_SIZE;
+	} while (usize > 0);
+
+
+	/*
+	 * Use common vm_area operations to track buffer refcount.
+	 */
+	vma->vm_private_data	= &buf->handler;
+	vma->vm_ops		= &vb2_common_vm_ops;
+
+	vma->vm_ops->open(vma);
+
+	return 0;
+}
+
+static void *vb2_dma_sg_cookie(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+
+	return &buf->sg_desc;
+}
+
+const struct vb2_mem_ops vb2_dma_sg_memops = {
+	.alloc		= vb2_dma_sg_alloc,
+	.put		= vb2_dma_sg_put,
+	.get_userptr	= vb2_dma_sg_get_userptr,
+	.put_userptr	= vb2_dma_sg_put_userptr,
+	.vaddr		= vb2_dma_sg_vaddr,
+	.mmap		= vb2_dma_sg_mmap,
+	.num_users	= vb2_dma_sg_num_users,
+	.cookie		= vb2_dma_sg_cookie,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
+
+MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
+MODULE_AUTHOR("Andrzej Pietrasiewicz");
+MODULE_LICENSE("GPL");
diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf2-memops.c b/include/media/videobuf2-memops.c
new file mode 100755
index 0000000..504cd4c
--- /dev/null
+++ b/include/media/videobuf2-memops.c
@@ -0,0 +1,227 @@
+/*
+ * videobuf2-memops.c - generic memory handling routines for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *	   Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+
+/**
+ * vb2_get_vma() - acquire and lock the virtual memory area
+ * @vma:	given virtual memory area
+ *
+ * This function attempts to acquire an area mapped in the userspace for
+ * the duration of a hardware operation. The area is "locked" by performing
+ * the same set of operation that are done when process calls fork() and
+ * memory areas are duplicated.
+ *
+ * Returns a copy of a virtual memory region on success or NULL.
+ */
+struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
+{
+	struct vm_area_struct *vma_copy;
+
+	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+	if (vma_copy == NULL)
+		return NULL;
+
+	if (vma->vm_ops && vma->vm_ops->open)
+		vma->vm_ops->open(vma);
+
+	if (vma->vm_file)
+		get_file(vma->vm_file);
+
+	memcpy(vma_copy, vma, sizeof(*vma));
+
+	vma_copy->vm_mm = NULL;
+	vma_copy->vm_next = NULL;
+	vma_copy->vm_prev = NULL;
+
+	return vma_copy;
+}
+EXPORT_SYMBOL_GPL(vb2_get_vma);
+
+/**
+ * vb2_put_userptr() - release a userspace virtual memory area
+ * @vma:	virtual memory region associated with the area to be released
+ *
+ * This function releases the previously acquired memory area after a hardware
+ * operation.
+ */
+void vb2_put_vma(struct vm_area_struct *vma)
+{
+	if (!vma)
+		return;
+
+	if (vma->vm_ops && vma->vm_ops->close)
+		vma->vm_ops->close(vma);
+
+	if (vma->vm_file)
+		fput(vma->vm_file);
+
+	kfree(vma);
+}
+EXPORT_SYMBOL_GPL(vb2_put_vma);
+
+/**
+ * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
+ * @vaddr:	starting virtual address of the area to be verified
+ * @size:	size of the area
+ * @res_paddr:	will return physical address for the given vaddr
+ * @res_vma:	will return locked copy of struct vm_area for the given area
+ *
+ * This function will go through memory area of size @size mapped at @vaddr and
+ * verify that the underlying physical pages are contiguous. If they are
+ * contiguous the virtual memory area is locked and a @res_vma is filled with
+ * the copy and @res_pa set to the physical address of the buffer.
+ *
+ * Returns 0 on success.
+ */
+int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
+			   struct vm_area_struct **res_vma, dma_addr_t *res_pa)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long offset, start, end;
+	unsigned long this_pfn, prev_pfn;
+	dma_addr_t pa = 0;
+
+	start = vaddr;
+	offset = start & ~PAGE_MASK;
+	end = start + size;
+
+	vma = find_vma(mm, start);
+
+	if (vma == NULL || vma->vm_end < end)
+		return -EFAULT;
+
+	for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
+		int ret = follow_pfn(vma, start, &this_pfn);
+		if (ret)
+			return ret;
+
+		if (prev_pfn == 0)
+			pa = this_pfn << PAGE_SHIFT;
+		else if (this_pfn != prev_pfn + 1)
+			return -EFAULT;
+
+		prev_pfn = this_pfn;
+	}
+
+	/*
+	 * Memory is contigous, lock vma and return to the caller
+	 */
+	*res_vma = vb2_get_vma(vma);
+	if (*res_vma == NULL)
+		return -ENOMEM;
+
+	*res_pa = pa + offset;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
+
+/**
+ * vb2_mmap_pfn_range() - map physical pages to userspace
+ * @vma:	virtual memory region for the mapping
+ * @paddr:	starting physical address of the memory to be mapped
+ * @size:	size of the memory to be mapped
+ * @vm_ops:	vm operations to be assigned to the created area
+ * @priv:	private data to be associated with the area
+ *
+ * Returns 0 on success.
+ */
+int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
+				unsigned long size,
+				const struct vm_operations_struct *vm_ops,
+				void *priv)
+{
+	int ret;
+
+	size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
+				size, vma->vm_page_prot);
+	if (ret) {
+		printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
+		return ret;
+	}
+
+	vma->vm_flags		|= VM_DONTEXPAND | VM_RESERVED;
+	vma->vm_private_data	= priv;
+	vma->vm_ops		= vm_ops;
+
+	vma->vm_ops->open(vma);
+
+	pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
+			__func__, paddr, vma->vm_start, size);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
+
+/**
+ * vb2_common_vm_open() - increase refcount of the vma
+ * @vma:	virtual memory region for the mapping
+ *
+ * This function adds another user to the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_open(struct vm_area_struct *vma)
+{
+	struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+	pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+	       __func__, h, atomic_read(h->refcount), vma->vm_start,
+	       vma->vm_end);
+
+	atomic_inc(h->refcount);
+}
+
+/**
+ * vb2_common_vm_close() - decrease refcount of the vma
+ * @vma:	virtual memory region for the mapping
+ *
+ * This function releases the user from the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_close(struct vm_area_struct *vma)
+{
+	struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+	pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+	       __func__, h, atomic_read(h->refcount), vma->vm_start,
+	       vma->vm_end);
+
+	h->put(h->arg);
+}
+
+/**
+ * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
+ * video buffers
+ */
+const struct vm_operations_struct vb2_common_vm_ops = {
+	.open = vb2_common_vm_open,
+	.close = vb2_common_vm_close,
+};
+EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
+
+MODULE_DESCRIPTION("common memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
old mode 100644
new mode 100755
diff --git a/include/media/videobuf2-msm-mem.c b/include/media/videobuf2-msm-mem.c
new file mode 100755
index 0000000..d8b42b8
--- /dev/null
+++ b/include/media/videobuf2-msm-mem.c
@@ -0,0 +1,351 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * Based on videobuf-dma-contig.c,
+ * (c) 2008 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * helper functions for physically contiguous pmem capture buffers
+ * The functions support contiguous memory allocations using pmem
+ * kernel API.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/android_pmem.h>
+#include <linux/memory_alloc.h>
+#include <media/videobuf2-msm-mem.h>
+#include <media/msm_camera.h>
+#include <mach/memory.h>
+#include <media/videobuf2-core.h>
+#include <mach/iommu_domains.h>
+
+#define MAGIC_PMEM 0x0733ac64
+#define MAGIC_CHECK(is, should)               \
+	if (unlikely((is) != (should))) {           \
+		pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
+		BUG();                  \
+	}
+
+#ifdef CONFIG_MSM_CAMERA_DEBUG
+#define D(fmt, args...) pr_debug("videobuf-msm-mem: " fmt, ##args)
+#else
+#define D(fmt, args...) do {} while (0)
+#endif
+
+static unsigned long msm_mem_allocate(struct videobuf2_contig_pmem *mem)
+{
+	unsigned long phyaddr;
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	int rc, len;
+	mem->client = msm_ion_client_create(-1, "camera");
+	if (IS_ERR((void *)mem->client)) {
+		pr_err("%s Could not create client\n", __func__);
+		goto client_failed;
+	}
+	mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K,
+		(0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID));
+	if (IS_ERR((void *)mem->ion_handle)) {
+		pr_err("%s Could not allocate\n", __func__);
+		goto alloc_failed;
+	}
+	rc = ion_map_iommu(mem->client, mem->ion_handle,
+			CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0,
+			(unsigned long *)&phyaddr,
+			(unsigned long *)&len, UNCACHED, 0);
+	if (rc < 0) {
+		pr_err("%s Could not get physical address\n", __func__);
+		goto phys_failed;
+	}
+#else
+	phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K);
+#endif
+	return phyaddr;
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+phys_failed:
+	ion_free(mem->client, mem->ion_handle);
+alloc_failed:
+	ion_client_destroy(mem->client);
+client_failed:
+	return 0;
+#endif
+}
+
+static int32_t msm_mem_free(struct videobuf2_contig_pmem *mem)
+{
+	int32_t rc = 0;
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	ion_unmap_iommu(mem->client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL);
+	ion_free(mem->client, mem->ion_handle);
+	ion_client_destroy(mem->client);
+#else
+	free_contiguous_memory_by_paddr(mem->phyaddr);
+#endif
+	return rc;
+}
+
+static void videobuf2_vm_close(struct vm_area_struct *vma)
+{
+	struct videobuf2_contig_pmem *mem = vma->vm_private_data;
+	D("vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+		mem, mem->count, vma->vm_start, vma->vm_end);
+	mem->count--;
+}
+static void videobuf2_vm_open(struct vm_area_struct *vma)
+{
+	struct videobuf2_contig_pmem *mem = vma->vm_private_data;
+	D("vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+		mem, mem->count, vma->vm_start, vma->vm_end);
+	mem->count++;
+}
+
+static const struct vm_operations_struct videobuf2_vm_ops = {
+	.open     = videobuf2_vm_open,
+	.close    = videobuf2_vm_close,
+};
+
+static void *msm_vb2_mem_ops_alloc(void *alloc_ctx, unsigned long size)
+{
+	struct videobuf2_contig_pmem *mem;
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return ERR_PTR(-ENOMEM);
+
+	mem->magic = MAGIC_PMEM;
+	mem->size =  PAGE_ALIGN(size);
+	mem->alloc_ctx = alloc_ctx;
+	mem->is_userptr = 0;
+	mem->phyaddr = msm_mem_allocate(mem);
+	if (!mem->phyaddr) {
+		pr_err("%s : pmem memory allocation failed\n", __func__);
+		kfree(mem);
+		return ERR_PTR(-ENOMEM);
+	}
+	mem->mapped_phyaddr = mem->phyaddr;
+	return mem;
+}
+static void msm_vb2_mem_ops_put(void *buf_priv)
+{
+	struct videobuf2_contig_pmem *mem = buf_priv;
+	if (!mem->is_userptr) {
+		D("%s Freeing memory ", __func__);
+		msm_mem_free(mem);
+	}
+	kfree(mem);
+}
+int videobuf2_pmem_contig_mmap_get(struct videobuf2_contig_pmem *mem,
+				struct videobuf2_msm_offset *offset,
+				enum videobuf2_buffer_type buffer_type,
+				int path)
+{
+	if (offset)
+		mem->offset = *offset;
+	else
+		memset(&mem->offset, 0, sizeof(struct videobuf2_msm_offset));
+	mem->buffer_type = buffer_type;
+	mem->path = path;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(videobuf2_pmem_contig_mmap_get);
+
+/**
+ * videobuf_pmem_contig_user_get() - setup user space memory pointer
+ * @mem: per-buffer private videobuf-contig-pmem data
+ * @vb: video buffer to map
+ *
+ * This function validates and sets up a pointer to user space memory.
+ * Only physically contiguous pfn-mapped memory is accepted.
+ *
+ * Returns 0 if successful.
+ */
+int videobuf2_pmem_contig_user_get(struct videobuf2_contig_pmem *mem,
+					struct videobuf2_msm_offset *offset,
+					enum videobuf2_buffer_type buffer_type,
+					uint32_t addr_offset, int path,
+					struct ion_client *client)
+{
+	unsigned long len;
+	int rc = 0;
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
+	unsigned long kvstart;
+#endif
+	unsigned long paddr = 0;
+	if (mem->phyaddr != 0)
+		return 0;
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+	mem->ion_handle = ion_import_dma_buf(client, (int)mem->vaddr);
+	if (IS_ERR_OR_NULL(mem->ion_handle)) {
+		pr_err("%s ION import failed\n", __func__);
+		return PTR_ERR(mem->ion_handle);
+	}
+	rc = ion_map_iommu(client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL,
+		SZ_4K, 0, (unsigned long *)&mem->phyaddr, &len, UNCACHED, 0);
+	if (rc < 0)
+		ion_free(client, mem->ion_handle);
+#elif CONFIG_ANDROID_PMEM
+	rc = get_pmem_file((int)mem->vaddr, (unsigned long *)&mem->phyaddr,
+					&kvstart, &len, &mem->file);
+	if (rc < 0) {
+		pr_err("%s: get_pmem_file fd %d error %d\n",
+					__func__, (int)mem->vaddr, rc);
+		return rc;
+	}
+#else
+	paddr = 0;
+	kvstart = 0;
+#endif
+	if (offset)
+		mem->offset = *offset;
+	else
+		memset(&mem->offset, 0, sizeof(struct videobuf2_msm_offset));
+	mem->path = path;
+	mem->buffer_type = buffer_type;
+	paddr = mem->phyaddr;
+	mem->mapped_phyaddr = paddr + addr_offset;
+	mem->addr_offset = addr_offset;
+	return rc;
+}
+EXPORT_SYMBOL_GPL(videobuf2_pmem_contig_user_get);
+
+void videobuf2_pmem_contig_user_put(struct videobuf2_contig_pmem *mem,
+					struct ion_client *client)
+{
+	if (mem->is_userptr) {
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+		ion_unmap_iommu(client, mem->ion_handle,
+				CAMERA_DOMAIN, GEN_POOL);
+		ion_free(client, mem->ion_handle);
+#elif CONFIG_ANDROID_PMEM
+		put_pmem_file(mem->file);
+#endif
+	}
+	mem->is_userptr = 0;
+	mem->phyaddr = 0;
+	mem->size = 0;
+	mem->mapped_phyaddr = 0;
+}
+EXPORT_SYMBOL_GPL(videobuf2_pmem_contig_user_put);
+
+static void *msm_vb2_mem_ops_get_userptr(void *alloc_ctx, unsigned long vaddr,
+					unsigned long size, int write)
+{
+	struct videobuf2_contig_pmem *mem;
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return ERR_PTR(-ENOMEM);
+	mem->magic = MAGIC_PMEM;
+	mem->is_userptr = 1;
+	mem->vaddr = (void *)vaddr;
+	mem->size = size;
+	mem->alloc_ctx = alloc_ctx;
+	return mem;
+}
+static void msm_vb2_mem_ops_put_userptr(void *buf_priv)
+{
+	kfree(buf_priv);
+}
+
+static void *msm_vb2_mem_ops_vaddr(void *buf_priv)
+{
+	struct videobuf2_contig_pmem *mem = buf_priv;
+	return mem->vaddr;
+}
+static void *msm_vb2_mem_ops_cookie(void *buf_priv)
+{
+	return buf_priv;
+}
+static unsigned int msm_vb2_mem_ops_num_users(void *buf_priv)
+{
+	struct videobuf2_contig_pmem *mem = buf_priv;
+	MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+	return mem->count;
+}
+static int msm_vb2_mem_ops_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+	struct videobuf2_contig_pmem *mem;
+	int retval;
+	unsigned long size;
+	D("%s\n", __func__);
+	mem = buf_priv;
+	D("mem = 0x%x\n", (u32)mem);
+	BUG_ON(!mem);
+	MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+	/* Try to remap memory */
+	size = vma->vm_end - vma->vm_start;
+	size = (size < mem->size) ? size : mem->size;
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	retval = remap_pfn_range(vma, vma->vm_start,
+			mem->phyaddr >> PAGE_SHIFT,
+			size, vma->vm_page_prot);
+	if (retval) {
+		pr_err("mmap: remap failed with error %d. ", retval);
+		goto error;
+	}
+	mem->vaddr = (void *)vma->vm_start;
+	vma->vm_ops = &videobuf2_vm_ops;
+	vma->vm_flags |= VM_DONTEXPAND;
+	vma->vm_private_data = mem;
+
+	D("mmap %p: %08lx-%08lx (%lx) pgoff %08lx\n",
+		vma, vma->vm_start, vma->vm_end,
+		(long int)mem->size, vma->vm_pgoff);
+	videobuf2_vm_open(vma);
+	return 0;
+error:
+	return -ENOMEM;
+}
+
+static struct vb2_mem_ops msm_vb2_mem_ops = {
+	.alloc = msm_vb2_mem_ops_alloc,
+	.put = msm_vb2_mem_ops_put,
+	.get_userptr = msm_vb2_mem_ops_get_userptr,
+	.put_userptr = msm_vb2_mem_ops_put_userptr,
+	.vaddr = msm_vb2_mem_ops_vaddr,
+	.cookie = msm_vb2_mem_ops_cookie,
+	.num_users = msm_vb2_mem_ops_num_users,
+	.mmap = msm_vb2_mem_ops_mmap
+};
+
+void videobuf2_queue_pmem_contig_init(struct vb2_queue *q,
+					enum v4l2_buf_type type,
+					const struct vb2_ops *ops,
+					unsigned int size,
+					void *priv)
+{
+	memset(q, 0, sizeof(struct vb2_queue));
+	q->mem_ops = &msm_vb2_mem_ops;
+	q->ops = ops;
+	q->drv_priv = priv;
+	q->type = type;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->io_flags = 0;
+	q->buf_struct_size = size;
+	vb2_queue_init(q);
+}
+EXPORT_SYMBOL_GPL(videobuf2_queue_pmem_contig_init);
+
+unsigned long videobuf2_to_pmem_contig(struct vb2_buffer *vb,
+				unsigned int plane_no)
+{
+	struct videobuf2_contig_pmem *mem;
+	mem = vb2_plane_cookie(vb, plane_no);
+	BUG_ON(!mem);
+	MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+	return mem->mapped_phyaddr;
+}
+EXPORT_SYMBOL_GPL(videobuf2_to_pmem_contig);
+
+MODULE_DESCRIPTION("helper module to manage video4linux PMEM contig buffers");
+MODULE_LICENSE("GPL v2");
diff --git a/include/media/videobuf2-msm-mem.h b/include/media/videobuf2-msm-mem.h
old mode 100644
new mode 100755
index 84e2bea..080ea03
--- a/include/media/videobuf2-msm-mem.h
+++ b/include/media/videobuf2-msm-mem.h
@@ -17,7 +17,7 @@
 
 #include <media/videobuf2-core.h>
 #include <mach/iommu_domains.h>
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 
 struct videobuf2_mapping {
 	unsigned int count;
@@ -74,7 +74,7 @@
 					uint32_t addr_offset, int path,
 					struct ion_client *client);
 void videobuf2_pmem_contig_user_put(struct videobuf2_contig_pmem *mem,
-					struct ion_client *client);
+				struct ion_client *client);
 unsigned long videobuf2_to_pmem_contig(struct vb2_buffer *buf,
 					unsigned int plane_no);
 
diff --git a/include/media/videobuf2-vmalloc.c b/include/media/videobuf2-vmalloc.c
new file mode 100755
index 0000000..6b5ca6c
--- /dev/null
+++ b/include/media/videobuf2-vmalloc.c
@@ -0,0 +1,222 @@
+/*
+ * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_vmalloc_buf {
+	void				*vaddr;
+	struct page			**pages;
+	struct vm_area_struct		*vma;
+	int				write;
+	unsigned long			size;
+	unsigned int			n_pages;
+	atomic_t			refcount;
+	struct vb2_vmarea_handler	handler;
+};
+
+static void vb2_vmalloc_put(void *buf_priv);
+
+static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size)
+{
+	struct vb2_vmalloc_buf *buf;
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return NULL;
+
+	buf->size = size;
+	buf->vaddr = vmalloc_user(buf->size);
+	buf->handler.refcount = &buf->refcount;
+	buf->handler.put = vb2_vmalloc_put;
+	buf->handler.arg = buf;
+
+	if (!buf->vaddr) {
+		pr_debug("vmalloc of size %ld failed\n", buf->size);
+		kfree(buf);
+		return NULL;
+	}
+
+	atomic_inc(&buf->refcount);
+	return buf;
+}
+
+static void vb2_vmalloc_put(void *buf_priv)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+
+	if (atomic_dec_and_test(&buf->refcount)) {
+		vfree(buf->vaddr);
+		kfree(buf);
+	}
+}
+
+static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+				     unsigned long size, int write)
+{
+	struct vb2_vmalloc_buf *buf;
+	unsigned long first, last;
+	int n_pages, offset;
+	struct vm_area_struct *vma;
+	dma_addr_t physp;
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return NULL;
+
+	buf->write = write;
+	offset = vaddr & ~PAGE_MASK;
+	buf->size = size;
+
+
+	vma = find_vma(current->mm, vaddr);
+	if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
+		if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
+			goto fail_pages_array_alloc;
+		buf->vma = vma;
+		buf->vaddr = ioremap_nocache(physp, size);
+		if (!buf->vaddr)
+			goto fail_pages_array_alloc;
+	} else {
+		first = vaddr >> PAGE_SHIFT;
+		last  = (vaddr + size - 1) >> PAGE_SHIFT;
+		buf->n_pages = last - first + 1;
+		buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
+				     GFP_KERNEL);
+		if (!buf->pages)
+			goto fail_pages_array_alloc;
+
+		/* current->mm->mmap_sem is taken by videobuf2 core */
+		n_pages = get_user_pages(current, current->mm,
+					 vaddr & PAGE_MASK, buf->n_pages,
+					 write, 1, /* force */
+					 buf->pages, NULL);
+		if (n_pages != buf->n_pages)
+			goto fail_get_user_pages;
+
+		buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
+					PAGE_KERNEL);
+		if (!buf->vaddr)
+			goto fail_get_user_pages;
+	}
+
+	buf->vaddr += offset;
+	return buf;
+
+fail_get_user_pages:
+	pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
+		 buf->n_pages);
+	while (--n_pages >= 0)
+		put_page(buf->pages[n_pages]);
+	kfree(buf->pages);
+
+fail_pages_array_alloc:
+	kfree(buf);
+
+	return NULL;
+}
+
+static void vb2_vmalloc_put_userptr(void *buf_priv)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
+	unsigned int i;
+
+	if (buf->pages) {
+		if (vaddr)
+			vm_unmap_ram((void *)vaddr, buf->n_pages);
+		for (i = 0; i < buf->n_pages; ++i) {
+			if (buf->write)
+				set_page_dirty_lock(buf->pages[i]);
+			put_page(buf->pages[i]);
+		}
+		kfree(buf->pages);
+	} else {
+		if (buf->vma)
+			vb2_put_vma(buf->vma);
+		iounmap(buf->vaddr);
+	}
+	kfree(buf);
+}
+
+static void *vb2_vmalloc_vaddr(void *buf_priv)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+
+	if (!buf->vaddr) {
+		pr_err("Address of an unallocated plane requested "
+		       "or cannot map user pointer\n");
+		return NULL;
+	}
+
+	return buf->vaddr;
+}
+
+static unsigned int vb2_vmalloc_num_users(void *buf_priv)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+	return atomic_read(&buf->refcount);
+}
+
+static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+	int ret;
+
+	if (!buf) {
+		pr_err("No memory to map\n");
+		return -EINVAL;
+	}
+
+	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
+	if (ret) {
+		pr_err("Remapping vmalloc memory, error: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * Make sure that vm_areas for 2 buffers won't be merged together
+	 */
+	vma->vm_flags		|= VM_DONTEXPAND;
+
+	/*
+	 * Use common vm_area operations to track buffer refcount.
+	 */
+	vma->vm_private_data	= &buf->handler;
+	vma->vm_ops		= &vb2_common_vm_ops;
+
+	vma->vm_ops->open(vma);
+
+	return 0;
+}
+
+const struct vb2_mem_ops vb2_vmalloc_memops = {
+	.alloc		= vb2_vmalloc_alloc,
+	.put		= vb2_vmalloc_put,
+	.get_userptr	= vb2_vmalloc_get_userptr,
+	.put_userptr	= vb2_vmalloc_put_userptr,
+	.vaddr		= vb2_vmalloc_vaddr,
+	.mmap		= vb2_vmalloc_mmap,
+	.num_users	= vb2_vmalloc_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
+
+MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
diff --git a/include/media/videobuf2-vmalloc.h b/include/media/videobuf2-vmalloc.h
old mode 100644
new mode 100755