blob: ffb9f00b391c56c22c2ca7a8bc53e12ac9baac6a [file] [log] [blame]
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001/*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <asm/cacheflush.h>
27#include <linux/clk.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/scatterlist.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/vmalloc.h>
34#include <media/v4l2-dev.h>
35#include <media/v4l2-ioctl.h>
36#include <plat/iommu.h>
37#include <plat/iovmm.h>
38#include <plat/omap-pm.h>
39
40#include "ispvideo.h"
41#include "isp.h"
42
43
44/* -----------------------------------------------------------------------------
45 * Helper functions
46 */
47
48static struct isp_format_info formats[] = {
49 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
Michael Jonesc09af042011-03-29 05:19:09 -030050 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
51 V4L2_PIX_FMT_GREY, 8, },
Michael Jones5782f972011-03-29 05:19:08 -030052 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030053 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
54 V4L2_PIX_FMT_Y10, 10, },
Michael Jones5782f972011-03-29 05:19:08 -030055 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030056 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
57 V4L2_PIX_FMT_Y12, 12, },
Michael Jones5782f972011-03-29 05:19:08 -030058 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
Michael Jonesc09af042011-03-29 05:19:09 -030059 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
60 V4L2_PIX_FMT_SBGGR8, 8, },
Michael Jones5782f972011-03-29 05:19:08 -030061 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
Michael Jonesc09af042011-03-29 05:19:09 -030062 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
63 V4L2_PIX_FMT_SGBRG8, 8, },
Michael Jones5782f972011-03-29 05:19:08 -030064 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
Michael Jonesc09af042011-03-29 05:19:09 -030065 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
66 V4L2_PIX_FMT_SGRBG8, 8, },
Michael Jones5782f972011-03-29 05:19:08 -030067 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
Michael Jonesc09af042011-03-29 05:19:09 -030068 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
69 V4L2_PIX_FMT_SRGGB8, 8, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030070 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
Michael Jonesc09af042011-03-29 05:19:09 -030071 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
72 V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030073 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030074 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
75 V4L2_PIX_FMT_SBGGR10, 10, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030076 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030077 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
78 V4L2_PIX_FMT_SGBRG10, 10, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030079 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030080 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
81 V4L2_PIX_FMT_SGRBG10, 10, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030082 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030083 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
84 V4L2_PIX_FMT_SRGGB10, 10, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030085 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030086 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
87 V4L2_PIX_FMT_SBGGR12, 12, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030088 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030089 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
90 V4L2_PIX_FMT_SGBRG12, 12, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030091 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030092 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
93 V4L2_PIX_FMT_SGRBG12, 12, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030094 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
Michael Jonesc09af042011-03-29 05:19:09 -030095 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
96 V4L2_PIX_FMT_SRGGB12, 12, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -030097 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
Michael Jonesc09af042011-03-29 05:19:09 -030098 V4L2_MBUS_FMT_UYVY8_1X16, 0,
99 V4L2_PIX_FMT_UYVY, 16, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300100 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
Michael Jonesc09af042011-03-29 05:19:09 -0300101 V4L2_MBUS_FMT_YUYV8_1X16, 0,
102 V4L2_PIX_FMT_YUYV, 16, },
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300103};
104
105const struct isp_format_info *
106omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
107{
108 unsigned int i;
109
110 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
111 if (formats[i].code == code)
112 return &formats[i];
113 }
114
115 return NULL;
116}
117
118/*
Michael Jonesc09af042011-03-29 05:19:09 -0300119 * Decide whether desired output pixel code can be obtained with
120 * the lane shifter by shifting the input pixel code.
121 * @in: input pixelcode to shifter
122 * @out: output pixelcode from shifter
123 * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
124 *
125 * return true if the combination is possible
126 * return false otherwise
127 */
128static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
129 enum v4l2_mbus_pixelcode out,
130 unsigned int additional_shift)
131{
132 const struct isp_format_info *in_info, *out_info;
133
134 if (in == out)
135 return true;
136
137 in_info = omap3isp_video_format_info(in);
138 out_info = omap3isp_video_format_info(out);
139
140 if ((in_info->flavor == 0) || (out_info->flavor == 0))
141 return false;
142
143 if (in_info->flavor != out_info->flavor)
144 return false;
145
146 return in_info->bpp - out_info->bpp + additional_shift <= 6;
147}
148
149/*
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300150 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
151 * @video: ISP video instance
152 * @mbus: v4l2_mbus_framefmt format (input)
153 * @pix: v4l2_pix_format format (output)
154 *
155 * Fill the output pix structure with information from the input mbus format.
156 * The bytesperline and sizeimage fields are computed from the requested bytes
157 * per line value in the pix format and information from the video instance.
158 *
159 * Return the number of padding bytes at end of line.
160 */
161static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
162 const struct v4l2_mbus_framefmt *mbus,
163 struct v4l2_pix_format *pix)
164{
165 unsigned int bpl = pix->bytesperline;
166 unsigned int min_bpl;
167 unsigned int i;
168
169 memset(pix, 0, sizeof(*pix));
170 pix->width = mbus->width;
171 pix->height = mbus->height;
172
173 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
174 if (formats[i].code == mbus->code)
175 break;
176 }
177
178 if (WARN_ON(i == ARRAY_SIZE(formats)))
179 return 0;
180
181 min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
182
183 /* Clamp the requested bytes per line value. If the maximum bytes per
184 * line value is zero, the module doesn't support user configurable line
185 * sizes. Override the requested value with the minimum in that case.
186 */
187 if (video->bpl_max)
188 bpl = clamp(bpl, min_bpl, video->bpl_max);
189 else
190 bpl = min_bpl;
191
192 if (!video->bpl_zero_padding || bpl != min_bpl)
193 bpl = ALIGN(bpl, video->bpl_alignment);
194
195 pix->pixelformat = formats[i].pixelformat;
196 pix->bytesperline = bpl;
197 pix->sizeimage = pix->bytesperline * pix->height;
198 pix->colorspace = mbus->colorspace;
199 pix->field = mbus->field;
200
201 return bpl - min_bpl;
202}
203
204static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
205 struct v4l2_mbus_framefmt *mbus)
206{
207 unsigned int i;
208
209 memset(mbus, 0, sizeof(*mbus));
210 mbus->width = pix->width;
211 mbus->height = pix->height;
212
Laurent Pinchartc3cd2572011-11-28 08:25:30 -0300213 /* Skip the last format in the loop so that it will be selected if no
214 * match is found.
215 */
216 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300217 if (formats[i].pixelformat == pix->pixelformat)
218 break;
219 }
220
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300221 mbus->code = formats[i].code;
222 mbus->colorspace = pix->colorspace;
223 mbus->field = pix->field;
224}
225
226static struct v4l2_subdev *
227isp_video_remote_subdev(struct isp_video *video, u32 *pad)
228{
229 struct media_pad *remote;
230
231 remote = media_entity_remote_source(&video->pad);
232
233 if (remote == NULL ||
234 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
235 return NULL;
236
237 if (pad)
238 *pad = remote->index;
239
240 return media_entity_to_v4l2_subdev(remote->entity);
241}
242
243/* Return a pointer to the ISP video instance at the far end of the pipeline. */
244static struct isp_video *
245isp_video_far_end(struct isp_video *video)
246{
247 struct media_entity_graph graph;
248 struct media_entity *entity = &video->video.entity;
249 struct media_device *mdev = entity->parent;
250 struct isp_video *far_end = NULL;
251
252 mutex_lock(&mdev->graph_mutex);
253 media_entity_graph_walk_start(&graph, entity);
254
255 while ((entity = media_entity_graph_walk_next(&graph))) {
256 if (entity == &video->video.entity)
257 continue;
258
259 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
260 continue;
261
262 far_end = to_isp_video(media_entity_to_video_device(entity));
263 if (far_end->type != video->type)
264 break;
265
266 far_end = NULL;
267 }
268
269 mutex_unlock(&mdev->graph_mutex);
270 return far_end;
271}
272
273/*
274 * Validate a pipeline by checking both ends of all links for format
275 * discrepancies.
276 *
277 * Compute the minimum time per frame value as the maximum of time per frame
278 * limits reported by every block in the pipeline.
279 *
280 * Return 0 if all formats match, or -EPIPE if at least one link is found with
Laurent Pinchart00542ed2011-07-31 17:12:02 +0200281 * different formats on its two ends or if the pipeline doesn't start with a
282 * video source (either a subdev with no input pad, or a non-subdev entity).
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300283 */
284static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
285{
286 struct isp_device *isp = pipe->output->isp;
287 struct v4l2_subdev_format fmt_source;
288 struct v4l2_subdev_format fmt_sink;
289 struct media_pad *pad;
290 struct v4l2_subdev *subdev;
291 int ret;
292
293 pipe->max_rate = pipe->l3_ick;
294
295 subdev = isp_video_remote_subdev(pipe->output, NULL);
296 if (subdev == NULL)
297 return -EPIPE;
298
299 while (1) {
Michael Jonesc09af042011-03-29 05:19:09 -0300300 unsigned int shifter_link;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300301 /* Retrieve the sink format */
302 pad = &subdev->entity.pads[0];
303 if (!(pad->flags & MEDIA_PAD_FL_SINK))
304 break;
305
306 fmt_sink.pad = pad->index;
307 fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
308 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
309 if (ret < 0 && ret != -ENOIOCTLCMD)
310 return -EPIPE;
311
312 /* Update the maximum frame rate */
313 if (subdev == &isp->isp_res.subdev)
314 omap3isp_resizer_max_rate(&isp->isp_res,
315 &pipe->max_rate);
316
317 /* Check ccdc maximum data rate when data comes from sensor
318 * TODO: Include ccdc rate in pipe->max_rate and compare the
319 * total pipe rate with the input data rate from sensor.
320 */
321 if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
322 unsigned int rate = UINT_MAX;
323
324 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
325 if (isp->isp_ccdc.vpcfg.pixelclk > rate)
326 return -ENOSPC;
327 }
328
Michael Jonesc09af042011-03-29 05:19:09 -0300329 /* If sink pad is on CCDC, the link has the lane shifter
330 * in the middle of it. */
331 shifter_link = subdev == &isp->isp_ccdc.subdev;
332
Laurent Pinchart00542ed2011-07-31 17:12:02 +0200333 /* Retrieve the source format. Return an error if no source
334 * entity can be found, and stop checking the pipeline if the
335 * source entity isn't a subdev.
336 */
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300337 pad = media_entity_remote_source(pad);
Laurent Pinchart00542ed2011-07-31 17:12:02 +0200338 if (pad == NULL)
339 return -EPIPE;
340
341 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300342 break;
343
344 subdev = media_entity_to_v4l2_subdev(pad->entity);
345
346 fmt_source.pad = pad->index;
347 fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
348 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
349 if (ret < 0 && ret != -ENOIOCTLCMD)
350 return -EPIPE;
351
352 /* Check if the two ends match */
Michael Jonesc09af042011-03-29 05:19:09 -0300353 if (fmt_source.format.width != fmt_sink.format.width ||
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300354 fmt_source.format.height != fmt_sink.format.height)
355 return -EPIPE;
Michael Jonesc09af042011-03-29 05:19:09 -0300356
357 if (shifter_link) {
358 unsigned int parallel_shift = 0;
359 if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
360 struct isp_parallel_platform_data *pdata =
361 &((struct isp_v4l2_subdevs_group *)
362 subdev->host_priv)->bus.parallel;
363 parallel_shift = pdata->data_lane_shift * 2;
364 }
365 if (!isp_video_is_shiftable(fmt_source.format.code,
366 fmt_sink.format.code,
367 parallel_shift))
368 return -EPIPE;
369 } else if (fmt_source.format.code != fmt_sink.format.code)
370 return -EPIPE;
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300371 }
372
373 return 0;
374}
375
376static int
377__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
378{
379 struct v4l2_subdev_format fmt;
380 struct v4l2_subdev *subdev;
381 u32 pad;
382 int ret;
383
384 subdev = isp_video_remote_subdev(video, &pad);
385 if (subdev == NULL)
386 return -EINVAL;
387
388 mutex_lock(&video->mutex);
389
390 fmt.pad = pad;
391 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
392 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
393 if (ret == -ENOIOCTLCMD)
394 ret = -EINVAL;
395
396 mutex_unlock(&video->mutex);
397
398 if (ret)
399 return ret;
400
401 format->type = video->type;
402 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
403}
404
405static int
406isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
407{
408 struct v4l2_format format;
409 int ret;
410
411 memcpy(&format, &vfh->format, sizeof(format));
412 ret = __isp_video_get_format(video, &format);
413 if (ret < 0)
414 return ret;
415
416 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
417 vfh->format.fmt.pix.height != format.fmt.pix.height ||
418 vfh->format.fmt.pix.width != format.fmt.pix.width ||
419 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
420 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
421 return -EINVAL;
422
423 return ret;
424}
425
426/* -----------------------------------------------------------------------------
427 * IOMMU management
428 */
429
430#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
431
432/*
433 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
434 * @dev: Device pointer specific to the OMAP3 ISP.
435 * @sglist: Pointer to source Scatter gather list to allocate.
436 * @sglen: Number of elements of the scatter-gatter list.
437 *
438 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
439 * we ran out of memory.
440 */
441static dma_addr_t
442ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
443{
444 struct sg_table *sgt;
445 u32 da;
446
447 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
448 if (sgt == NULL)
449 return -ENOMEM;
450
451 sgt->sgl = (struct scatterlist *)sglist;
452 sgt->nents = sglen;
453 sgt->orig_nents = sglen;
454
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300455 da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG);
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300456 if (IS_ERR_VALUE(da))
457 kfree(sgt);
458
459 return da;
460}
461
462/*
463 * ispmmu_vunmap - Unmap a device address from the ISP MMU
464 * @dev: Device pointer specific to the OMAP3 ISP.
465 * @da: Device address generated from a ispmmu_vmap call.
466 */
467static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
468{
469 struct sg_table *sgt;
470
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300471 sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da);
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300472 kfree(sgt);
473}
474
475/* -----------------------------------------------------------------------------
476 * Video queue operations
477 */
478
479static void isp_video_queue_prepare(struct isp_video_queue *queue,
480 unsigned int *nbuffers, unsigned int *size)
481{
482 struct isp_video_fh *vfh =
483 container_of(queue, struct isp_video_fh, queue);
484 struct isp_video *video = vfh->video;
485
486 *size = vfh->format.fmt.pix.sizeimage;
487 if (*size == 0)
488 return;
489
490 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
491}
492
493static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
494{
495 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
496 struct isp_buffer *buffer = to_isp_buffer(buf);
497 struct isp_video *video = vfh->video;
498
499 if (buffer->isp_addr) {
500 ispmmu_vunmap(video->isp, buffer->isp_addr);
501 buffer->isp_addr = 0;
502 }
503}
504
505static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
506{
507 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
508 struct isp_buffer *buffer = to_isp_buffer(buf);
509 struct isp_video *video = vfh->video;
510 unsigned long addr;
511
512 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
513 if (IS_ERR_VALUE(addr))
514 return -EIO;
515
516 if (!IS_ALIGNED(addr, 32)) {
517 dev_dbg(video->isp->dev, "Buffer address must be "
518 "aligned to 32 bytes boundary.\n");
519 ispmmu_vunmap(video->isp, buffer->isp_addr);
520 return -EINVAL;
521 }
522
523 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
524 buffer->isp_addr = addr;
525 return 0;
526}
527
528/*
529 * isp_video_buffer_queue - Add buffer to streaming queue
530 * @buf: Video buffer
531 *
532 * In memory-to-memory mode, start streaming on the pipeline if buffers are
533 * queued on both the input and the output, if the pipeline isn't already busy.
534 * If the pipeline is busy, it will be restarted in the output module interrupt
535 * handler.
536 */
537static void isp_video_buffer_queue(struct isp_video_buffer *buf)
538{
539 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
540 struct isp_buffer *buffer = to_isp_buffer(buf);
541 struct isp_video *video = vfh->video;
542 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
543 enum isp_pipeline_state state;
544 unsigned long flags;
545 unsigned int empty;
546 unsigned int start;
547
548 empty = list_empty(&video->dmaqueue);
549 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
550
551 if (empty) {
552 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
553 state = ISP_PIPELINE_QUEUE_OUTPUT;
554 else
555 state = ISP_PIPELINE_QUEUE_INPUT;
556
557 spin_lock_irqsave(&pipe->lock, flags);
558 pipe->state |= state;
559 video->ops->queue(video, buffer);
560 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
561
562 start = isp_pipeline_ready(pipe);
563 if (start)
564 pipe->state |= ISP_PIPELINE_STREAM;
565 spin_unlock_irqrestore(&pipe->lock, flags);
566
567 if (start)
568 omap3isp_pipeline_set_stream(pipe,
569 ISP_PIPELINE_STREAM_SINGLESHOT);
570 }
571}
572
573static const struct isp_video_queue_operations isp_video_queue_ops = {
574 .queue_prepare = &isp_video_queue_prepare,
575 .buffer_prepare = &isp_video_buffer_prepare,
576 .buffer_queue = &isp_video_buffer_queue,
577 .buffer_cleanup = &isp_video_buffer_cleanup,
578};
579
580/*
581 * omap3isp_video_buffer_next - Complete the current buffer and return the next
582 * @video: ISP video object
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300583 *
584 * Remove the current video buffer from the DMA queue and fill its timestamp,
585 * field count and state fields before waking up its completion handler.
586 *
Laurent Pinchart875e2e32011-12-07 08:34:50 -0300587 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
588 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
589 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300590 *
591 * The DMA queue is expected to contain at least one buffer.
592 *
593 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
594 * empty.
595 */
Laurent Pinchart875e2e32011-12-07 08:34:50 -0300596struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300597{
598 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
599 struct isp_video_queue *queue = video->queue;
600 enum isp_pipeline_state state;
601 struct isp_video_buffer *buf;
602 unsigned long flags;
603 struct timespec ts;
604
605 spin_lock_irqsave(&queue->irqlock, flags);
606 if (WARN_ON(list_empty(&video->dmaqueue))) {
607 spin_unlock_irqrestore(&queue->irqlock, flags);
608 return NULL;
609 }
610
611 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
612 irqlist);
613 list_del(&buf->irqlist);
614 spin_unlock_irqrestore(&queue->irqlock, flags);
615
616 ktime_get_ts(&ts);
617 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
618 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
619
620 /* Do frame number propagation only if this is the output video node.
621 * Frame number either comes from the CSI receivers or it gets
622 * incremented here if H3A is not active.
623 * Note: There is no guarantee that the output buffer will finish
624 * first, so the input number might lag behind by 1 in some cases.
625 */
626 if (video == pipe->output && !pipe->do_propagation)
627 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
628 else
629 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
630
Laurent Pinchart875e2e32011-12-07 08:34:50 -0300631 /* Report pipeline errors to userspace on the capture device side. */
632 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
633 buf->state = ISP_BUF_STATE_ERROR;
634 pipe->error = false;
635 } else {
636 buf->state = ISP_BUF_STATE_DONE;
637 }
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300638
639 wake_up(&buf->wait);
640
641 if (list_empty(&video->dmaqueue)) {
642 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
643 state = ISP_PIPELINE_QUEUE_OUTPUT
644 | ISP_PIPELINE_STREAM;
645 else
646 state = ISP_PIPELINE_QUEUE_INPUT
647 | ISP_PIPELINE_STREAM;
648
649 spin_lock_irqsave(&pipe->lock, flags);
650 pipe->state &= ~state;
651 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
652 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
653 spin_unlock_irqrestore(&pipe->lock, flags);
654 return NULL;
655 }
656
657 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
658 spin_lock_irqsave(&pipe->lock, flags);
659 pipe->state &= ~ISP_PIPELINE_STREAM;
660 spin_unlock_irqrestore(&pipe->lock, flags);
661 }
662
663 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
664 irqlist);
665 buf->state = ISP_BUF_STATE_ACTIVE;
666 return to_isp_buffer(buf);
667}
668
669/*
670 * omap3isp_video_resume - Perform resume operation on the buffers
671 * @video: ISP video object
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300672 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300673 *
674 * This function is intended to be used on suspend/resume scenario. It
675 * requests video queue layer to discard buffers marked as DONE if it's in
676 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
677 * if there's any.
678 */
679void omap3isp_video_resume(struct isp_video *video, int continuous)
680{
681 struct isp_buffer *buf = NULL;
682
683 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
684 omap3isp_video_queue_discard_done(video->queue);
685
686 if (!list_empty(&video->dmaqueue)) {
687 buf = list_first_entry(&video->dmaqueue,
688 struct isp_buffer, buffer.irqlist);
689 video->ops->queue(video, buf);
690 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
691 } else {
692 if (continuous)
693 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
694 }
695}
696
697/* -----------------------------------------------------------------------------
698 * V4L2 ioctls
699 */
700
701static int
702isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
703{
704 struct isp_video *video = video_drvdata(file);
705
706 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
707 strlcpy(cap->card, video->video.name, sizeof(cap->card));
708 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
Laurent Pinchartad614ac2011-02-12 18:05:06 -0300709
710 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
711 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
712 else
713 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
714
715 return 0;
716}
717
718static int
719isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
720{
721 struct isp_video_fh *vfh = to_isp_video_fh(fh);
722 struct isp_video *video = video_drvdata(file);
723
724 if (format->type != video->type)
725 return -EINVAL;
726
727 mutex_lock(&video->mutex);
728 *format = vfh->format;
729 mutex_unlock(&video->mutex);
730
731 return 0;
732}
733
734static int
735isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
736{
737 struct isp_video_fh *vfh = to_isp_video_fh(fh);
738 struct isp_video *video = video_drvdata(file);
739 struct v4l2_mbus_framefmt fmt;
740
741 if (format->type != video->type)
742 return -EINVAL;
743
744 mutex_lock(&video->mutex);
745
746 /* Fill the bytesperline and sizeimage fields by converting to media bus
747 * format and back to pixel format.
748 */
749 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
750 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
751
752 vfh->format = *format;
753
754 mutex_unlock(&video->mutex);
755 return 0;
756}
757
758static int
759isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
760{
761 struct isp_video *video = video_drvdata(file);
762 struct v4l2_subdev_format fmt;
763 struct v4l2_subdev *subdev;
764 u32 pad;
765 int ret;
766
767 if (format->type != video->type)
768 return -EINVAL;
769
770 subdev = isp_video_remote_subdev(video, &pad);
771 if (subdev == NULL)
772 return -EINVAL;
773
774 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
775
776 fmt.pad = pad;
777 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
778 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
779 if (ret)
780 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
781
782 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
783 return 0;
784}
785
786static int
787isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
788{
789 struct isp_video *video = video_drvdata(file);
790 struct v4l2_subdev *subdev;
791 int ret;
792
793 subdev = isp_video_remote_subdev(video, NULL);
794 if (subdev == NULL)
795 return -EINVAL;
796
797 mutex_lock(&video->mutex);
798 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
799 mutex_unlock(&video->mutex);
800
801 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
802}
803
804static int
805isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
806{
807 struct isp_video *video = video_drvdata(file);
808 struct v4l2_subdev_format format;
809 struct v4l2_subdev *subdev;
810 u32 pad;
811 int ret;
812
813 subdev = isp_video_remote_subdev(video, &pad);
814 if (subdev == NULL)
815 return -EINVAL;
816
817 /* Try the get crop operation first and fallback to get format if not
818 * implemented.
819 */
820 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
821 if (ret != -ENOIOCTLCMD)
822 return ret;
823
824 format.pad = pad;
825 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
826 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
827 if (ret < 0)
828 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
829
830 crop->c.left = 0;
831 crop->c.top = 0;
832 crop->c.width = format.format.width;
833 crop->c.height = format.format.height;
834
835 return 0;
836}
837
838static int
839isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
840{
841 struct isp_video *video = video_drvdata(file);
842 struct v4l2_subdev *subdev;
843 int ret;
844
845 subdev = isp_video_remote_subdev(video, NULL);
846 if (subdev == NULL)
847 return -EINVAL;
848
849 mutex_lock(&video->mutex);
850 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
851 mutex_unlock(&video->mutex);
852
853 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
854}
855
856static int
857isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
858{
859 struct isp_video_fh *vfh = to_isp_video_fh(fh);
860 struct isp_video *video = video_drvdata(file);
861
862 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
863 video->type != a->type)
864 return -EINVAL;
865
866 memset(a, 0, sizeof(*a));
867 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
868 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
869 a->parm.output.timeperframe = vfh->timeperframe;
870
871 return 0;
872}
873
874static int
875isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
876{
877 struct isp_video_fh *vfh = to_isp_video_fh(fh);
878 struct isp_video *video = video_drvdata(file);
879
880 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
881 video->type != a->type)
882 return -EINVAL;
883
884 if (a->parm.output.timeperframe.denominator == 0)
885 a->parm.output.timeperframe.denominator = 1;
886
887 vfh->timeperframe = a->parm.output.timeperframe;
888
889 return 0;
890}
891
892static int
893isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
894{
895 struct isp_video_fh *vfh = to_isp_video_fh(fh);
896
897 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
898}
899
900static int
901isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
902{
903 struct isp_video_fh *vfh = to_isp_video_fh(fh);
904
905 return omap3isp_video_queue_querybuf(&vfh->queue, b);
906}
907
908static int
909isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
910{
911 struct isp_video_fh *vfh = to_isp_video_fh(fh);
912
913 return omap3isp_video_queue_qbuf(&vfh->queue, b);
914}
915
916static int
917isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
918{
919 struct isp_video_fh *vfh = to_isp_video_fh(fh);
920
921 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
922 file->f_flags & O_NONBLOCK);
923}
924
925/*
926 * Stream management
927 *
928 * Every ISP pipeline has a single input and a single output. The input can be
929 * either a sensor or a video node. The output is always a video node.
930 *
931 * As every pipeline has an output video node, the ISP video objects at the
932 * pipeline output stores the pipeline state. It tracks the streaming state of
933 * both the input and output, as well as the availability of buffers.
934 *
935 * In sensor-to-memory mode, frames are always available at the pipeline input.
936 * Starting the sensor usually requires I2C transfers and must be done in
937 * interruptible context. The pipeline is started and stopped synchronously
938 * to the stream on/off commands. All modules in the pipeline will get their
939 * subdev set stream handler called. The module at the end of the pipeline must
940 * delay starting the hardware until buffers are available at its output.
941 *
942 * In memory-to-memory mode, starting/stopping the stream requires
943 * synchronization between the input and output. ISP modules can't be stopped
944 * in the middle of a frame, and at least some of the modules seem to become
945 * busy as soon as they're started, even if they don't receive a frame start
946 * event. For that reason frames need to be processed in single-shot mode. The
947 * driver needs to wait until a frame is completely processed and written to
948 * memory before restarting the pipeline for the next frame. Pipelined
949 * processing might be possible but requires more testing.
950 *
951 * Stream start must be delayed until buffers are available at both the input
952 * and output. The pipeline must be started in the videobuf queue callback with
953 * the buffers queue spinlock held. The modules subdev set stream operation must
954 * not sleep.
955 */
956static int
957isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
958{
959 struct isp_video_fh *vfh = to_isp_video_fh(fh);
960 struct isp_video *video = video_drvdata(file);
961 enum isp_pipeline_state state;
962 struct isp_pipeline *pipe;
963 struct isp_video *far_end;
964 unsigned long flags;
965 int ret;
966
967 if (type != video->type)
968 return -EINVAL;
969
970 mutex_lock(&video->stream_lock);
971
972 if (video->streaming) {
973 mutex_unlock(&video->stream_lock);
974 return -EBUSY;
975 }
976
977 /* Start streaming on the pipeline. No link touching an entity in the
978 * pipeline can be activated or deactivated once streaming is started.
979 */
980 pipe = video->video.entity.pipe
981 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
982 media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
983
984 /* Verify that the currently configured format matches the output of
985 * the connected subdev.
986 */
987 ret = isp_video_check_format(video, vfh);
988 if (ret < 0)
989 goto error;
990
991 video->bpl_padding = ret;
992 video->bpl_value = vfh->format.fmt.pix.bytesperline;
993
994 /* Find the ISP video node connected at the far end of the pipeline and
995 * update the pipeline.
996 */
997 far_end = isp_video_far_end(video);
998
999 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1000 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1001 pipe->input = far_end;
1002 pipe->output = video;
1003 } else {
1004 if (far_end == NULL) {
1005 ret = -EPIPE;
1006 goto error;
1007 }
1008
1009 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1010 pipe->input = video;
1011 pipe->output = far_end;
1012 }
1013
Laurent Pinchart4b0ec192011-03-03 10:05:22 -03001014 if (video->isp->pdata->set_constraints)
1015 video->isp->pdata->set_constraints(video->isp, true);
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001016 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1017
1018 /* Validate the pipeline and update its state. */
1019 ret = isp_video_validate_pipeline(pipe);
1020 if (ret < 0)
1021 goto error;
1022
Laurent Pinchart875e2e32011-12-07 08:34:50 -03001023 pipe->error = false;
1024
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001025 spin_lock_irqsave(&pipe->lock, flags);
1026 pipe->state &= ~ISP_PIPELINE_STREAM;
1027 pipe->state |= state;
1028 spin_unlock_irqrestore(&pipe->lock, flags);
1029
1030 /* Set the maximum time per frame as the value requested by userspace.
1031 * This is a soft limit that can be overridden if the hardware doesn't
1032 * support the request limit.
1033 */
1034 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1035 pipe->max_timeperframe = vfh->timeperframe;
1036
1037 video->queue = &vfh->queue;
1038 INIT_LIST_HEAD(&video->dmaqueue);
1039 atomic_set(&pipe->frame_number, -1);
1040
1041 ret = omap3isp_video_queue_streamon(&vfh->queue);
1042 if (ret < 0)
1043 goto error;
1044
1045 /* In sensor-to-memory mode, the stream can be started synchronously
1046 * to the stream on command. In memory-to-memory mode, it will be
1047 * started when buffers are queued on both the input and output.
1048 */
1049 if (pipe->input == NULL) {
1050 ret = omap3isp_pipeline_set_stream(pipe,
1051 ISP_PIPELINE_STREAM_CONTINUOUS);
1052 if (ret < 0)
1053 goto error;
1054 spin_lock_irqsave(&video->queue->irqlock, flags);
1055 if (list_empty(&video->dmaqueue))
1056 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1057 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1058 }
1059
1060error:
1061 if (ret < 0) {
1062 omap3isp_video_queue_streamoff(&vfh->queue);
Laurent Pinchart4b0ec192011-03-03 10:05:22 -03001063 if (video->isp->pdata->set_constraints)
1064 video->isp->pdata->set_constraints(video->isp, false);
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001065 media_entity_pipeline_stop(&video->video.entity);
Laurent Pinchart5b6c3ef2011-08-13 13:13:32 -03001066 /* The DMA queue must be emptied here, otherwise CCDC interrupts
1067 * that will get triggered the next time the CCDC is powered up
1068 * will try to access buffers that might have been freed but
1069 * still present in the DMA queue. This can easily get triggered
1070 * if the above omap3isp_pipeline_set_stream() call fails on a
1071 * system with a free-running sensor.
1072 */
1073 INIT_LIST_HEAD(&video->dmaqueue);
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001074 video->queue = NULL;
1075 }
1076
1077 if (!ret)
1078 video->streaming = 1;
1079
1080 mutex_unlock(&video->stream_lock);
1081 return ret;
1082}
1083
1084static int
1085isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1086{
1087 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1088 struct isp_video *video = video_drvdata(file);
1089 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1090 enum isp_pipeline_state state;
1091 unsigned int streaming;
1092 unsigned long flags;
1093
1094 if (type != video->type)
1095 return -EINVAL;
1096
1097 mutex_lock(&video->stream_lock);
1098
1099 /* Make sure we're not streaming yet. */
1100 mutex_lock(&vfh->queue.lock);
1101 streaming = vfh->queue.streaming;
1102 mutex_unlock(&vfh->queue.lock);
1103
1104 if (!streaming)
1105 goto done;
1106
1107 /* Update the pipeline state. */
1108 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1109 state = ISP_PIPELINE_STREAM_OUTPUT
1110 | ISP_PIPELINE_QUEUE_OUTPUT;
1111 else
1112 state = ISP_PIPELINE_STREAM_INPUT
1113 | ISP_PIPELINE_QUEUE_INPUT;
1114
1115 spin_lock_irqsave(&pipe->lock, flags);
1116 pipe->state &= ~state;
1117 spin_unlock_irqrestore(&pipe->lock, flags);
1118
1119 /* Stop the stream. */
1120 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1121 omap3isp_video_queue_streamoff(&vfh->queue);
1122 video->queue = NULL;
1123 video->streaming = 0;
1124
Laurent Pinchart4b0ec192011-03-03 10:05:22 -03001125 if (video->isp->pdata->set_constraints)
1126 video->isp->pdata->set_constraints(video->isp, false);
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001127 media_entity_pipeline_stop(&video->video.entity);
1128
1129done:
1130 mutex_unlock(&video->stream_lock);
1131 return 0;
1132}
1133
1134static int
1135isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1136{
1137 if (input->index > 0)
1138 return -EINVAL;
1139
1140 strlcpy(input->name, "camera", sizeof(input->name));
1141 input->type = V4L2_INPUT_TYPE_CAMERA;
1142
1143 return 0;
1144}
1145
1146static int
1147isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1148{
1149 *input = 0;
1150
1151 return 0;
1152}
1153
1154static int
1155isp_video_s_input(struct file *file, void *fh, unsigned int input)
1156{
1157 return input == 0 ? 0 : -EINVAL;
1158}
1159
1160static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1161 .vidioc_querycap = isp_video_querycap,
1162 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1163 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1164 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1165 .vidioc_g_fmt_vid_out = isp_video_get_format,
1166 .vidioc_s_fmt_vid_out = isp_video_set_format,
1167 .vidioc_try_fmt_vid_out = isp_video_try_format,
1168 .vidioc_cropcap = isp_video_cropcap,
1169 .vidioc_g_crop = isp_video_get_crop,
1170 .vidioc_s_crop = isp_video_set_crop,
1171 .vidioc_g_parm = isp_video_get_param,
1172 .vidioc_s_parm = isp_video_set_param,
1173 .vidioc_reqbufs = isp_video_reqbufs,
1174 .vidioc_querybuf = isp_video_querybuf,
1175 .vidioc_qbuf = isp_video_qbuf,
1176 .vidioc_dqbuf = isp_video_dqbuf,
1177 .vidioc_streamon = isp_video_streamon,
1178 .vidioc_streamoff = isp_video_streamoff,
1179 .vidioc_enum_input = isp_video_enum_input,
1180 .vidioc_g_input = isp_video_g_input,
1181 .vidioc_s_input = isp_video_s_input,
1182};
1183
1184/* -----------------------------------------------------------------------------
1185 * V4L2 file operations
1186 */
1187
1188static int isp_video_open(struct file *file)
1189{
1190 struct isp_video *video = video_drvdata(file);
1191 struct isp_video_fh *handle;
1192 int ret = 0;
1193
1194 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1195 if (handle == NULL)
1196 return -ENOMEM;
1197
1198 v4l2_fh_init(&handle->vfh, &video->video);
1199 v4l2_fh_add(&handle->vfh);
1200
1201 /* If this is the first user, initialise the pipeline. */
1202 if (omap3isp_get(video->isp) == NULL) {
1203 ret = -EBUSY;
1204 goto done;
1205 }
1206
1207 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1208 if (ret < 0) {
1209 omap3isp_put(video->isp);
1210 goto done;
1211 }
1212
1213 omap3isp_video_queue_init(&handle->queue, video->type,
1214 &isp_video_queue_ops, video->isp->dev,
1215 sizeof(struct isp_buffer));
1216
1217 memset(&handle->format, 0, sizeof(handle->format));
1218 handle->format.type = video->type;
1219 handle->timeperframe.denominator = 1;
1220
1221 handle->video = video;
1222 file->private_data = &handle->vfh;
1223
1224done:
1225 if (ret < 0) {
1226 v4l2_fh_del(&handle->vfh);
1227 kfree(handle);
1228 }
1229
1230 return ret;
1231}
1232
1233static int isp_video_release(struct file *file)
1234{
1235 struct isp_video *video = video_drvdata(file);
1236 struct v4l2_fh *vfh = file->private_data;
1237 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1238
1239 /* Disable streaming and free the buffers queue resources. */
1240 isp_video_streamoff(file, vfh, video->type);
1241
1242 mutex_lock(&handle->queue.lock);
1243 omap3isp_video_queue_cleanup(&handle->queue);
1244 mutex_unlock(&handle->queue.lock);
1245
1246 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1247
1248 /* Release the file handle. */
1249 v4l2_fh_del(vfh);
1250 kfree(handle);
1251 file->private_data = NULL;
1252
1253 omap3isp_put(video->isp);
1254
1255 return 0;
1256}
1257
1258static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1259{
1260 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1261 struct isp_video_queue *queue = &vfh->queue;
1262
1263 return omap3isp_video_queue_poll(queue, file, wait);
1264}
1265
1266static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1267{
1268 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1269
1270 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1271}
1272
1273static struct v4l2_file_operations isp_video_fops = {
1274 .owner = THIS_MODULE,
1275 .unlocked_ioctl = video_ioctl2,
1276 .open = isp_video_open,
1277 .release = isp_video_release,
1278 .poll = isp_video_poll,
1279 .mmap = isp_video_mmap,
1280};
1281
1282/* -----------------------------------------------------------------------------
1283 * ISP video core
1284 */
1285
1286static const struct isp_video_operations isp_video_dummy_ops = {
1287};
1288
1289int omap3isp_video_init(struct isp_video *video, const char *name)
1290{
1291 const char *direction;
1292 int ret;
1293
1294 switch (video->type) {
1295 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1296 direction = "output";
1297 video->pad.flags = MEDIA_PAD_FL_SINK;
1298 break;
1299 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1300 direction = "input";
1301 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1302 break;
1303
1304 default:
1305 return -EINVAL;
1306 }
1307
1308 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1309 if (ret < 0)
1310 return ret;
1311
1312 mutex_init(&video->mutex);
1313 atomic_set(&video->active, 0);
1314
1315 spin_lock_init(&video->pipe.lock);
1316 mutex_init(&video->stream_lock);
1317
1318 /* Initialize the video device. */
1319 if (video->ops == NULL)
1320 video->ops = &isp_video_dummy_ops;
1321
1322 video->video.fops = &isp_video_fops;
1323 snprintf(video->video.name, sizeof(video->video.name),
1324 "OMAP3 ISP %s %s", name, direction);
1325 video->video.vfl_type = VFL_TYPE_GRABBER;
1326 video->video.release = video_device_release_empty;
1327 video->video.ioctl_ops = &isp_video_ioctl_ops;
1328 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1329
1330 video_set_drvdata(&video->video, video);
1331
1332 return 0;
1333}
1334
Laurent Pinchart63b4ca22011-09-22 16:54:34 -03001335void omap3isp_video_cleanup(struct isp_video *video)
1336{
1337 media_entity_cleanup(&video->video.entity);
Laurent Pincharted33ac82011-09-22 17:09:26 -03001338 mutex_destroy(&video->stream_lock);
1339 mutex_destroy(&video->mutex);
Laurent Pinchart63b4ca22011-09-22 16:54:34 -03001340}
1341
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001342int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1343{
1344 int ret;
1345
1346 video->video.v4l2_dev = vdev;
1347
1348 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1349 if (ret < 0)
1350 printk(KERN_ERR "%s: could not register video device (%d)\n",
1351 __func__, ret);
1352
1353 return ret;
1354}
1355
1356void omap3isp_video_unregister(struct isp_video *video)
1357{
Laurent Pinchart63b4ca22011-09-22 16:54:34 -03001358 if (video_is_registered(&video->video))
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001359 video_unregister_device(&video->video);
Laurent Pinchartad614ac2011-02-12 18:05:06 -03001360}