blob: 670f2f834e6a2190c14b185683e47f785a74d86e [file] [log] [blame]
Sakari Ailusc3b5b022010-03-01 05:14:18 -03001/*
2 * v4l2-event.c
3 *
4 * V4L2 events.
5 *
6 * Copyright (C) 2009--2010 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25#include <media/v4l2-dev.h>
26#include <media/v4l2-fh.h>
27#include <media/v4l2-event.h>
Hans Verkuil6e239392011-06-07 11:13:44 -030028#include <media/v4l2-ctrls.h>
Sakari Ailusc3b5b022010-03-01 05:14:18 -030029
30#include <linux/sched.h>
31#include <linux/slab.h>
32
Hans Verkuil6e239392011-06-07 11:13:44 -030033static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh);
34
Sakari Ailusc3b5b022010-03-01 05:14:18 -030035int v4l2_event_init(struct v4l2_fh *fh)
36{
37 fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
38 if (fh->events == NULL)
39 return -ENOMEM;
40
41 init_waitqueue_head(&fh->events->wait);
42
43 INIT_LIST_HEAD(&fh->events->free);
44 INIT_LIST_HEAD(&fh->events->available);
45 INIT_LIST_HEAD(&fh->events->subscribed);
46
47 fh->events->sequence = -1;
48
49 return 0;
50}
Laurent Pinchart0a4f8d02010-05-02 14:32:43 -030051EXPORT_SYMBOL_GPL(v4l2_event_init);
Sakari Ailusc3b5b022010-03-01 05:14:18 -030052
53int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
54{
55 struct v4l2_events *events = fh->events;
56 unsigned long flags;
57
58 if (!events) {
59 WARN_ON(1);
60 return -ENOMEM;
61 }
62
63 while (events->nallocated < n) {
64 struct v4l2_kevent *kev;
65
66 kev = kzalloc(sizeof(*kev), GFP_KERNEL);
67 if (kev == NULL)
68 return -ENOMEM;
69
70 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
71 list_add_tail(&kev->list, &events->free);
72 events->nallocated++;
73 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
74 }
75
76 return 0;
77}
78EXPORT_SYMBOL_GPL(v4l2_event_alloc);
79
80#define list_kfree(list, type, member) \
81 while (!list_empty(list)) { \
82 type *hi; \
83 hi = list_first_entry(list, type, member); \
84 list_del(&hi->member); \
85 kfree(hi); \
86 }
87
88void v4l2_event_free(struct v4l2_fh *fh)
89{
90 struct v4l2_events *events = fh->events;
91
92 if (!events)
93 return;
94
95 list_kfree(&events->free, struct v4l2_kevent, list);
96 list_kfree(&events->available, struct v4l2_kevent, list);
Hans Verkuil6e239392011-06-07 11:13:44 -030097 v4l2_event_unsubscribe_all(fh);
Sakari Ailusc3b5b022010-03-01 05:14:18 -030098
99 kfree(events);
100 fh->events = NULL;
101}
102EXPORT_SYMBOL_GPL(v4l2_event_free);
103
104static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
105{
106 struct v4l2_events *events = fh->events;
107 struct v4l2_kevent *kev;
108 unsigned long flags;
109
110 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
111
112 if (list_empty(&events->available)) {
113 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
114 return -ENOENT;
115 }
116
117 WARN_ON(events->navailable == 0);
118
119 kev = list_first_entry(&events->available, struct v4l2_kevent, list);
120 list_move(&kev->list, &events->free);
121 events->navailable--;
122
123 kev->event.pending = events->navailable;
124 *event = kev->event;
125
126 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
127
128 return 0;
129}
130
131int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
132 int nonblocking)
133{
134 struct v4l2_events *events = fh->events;
135 int ret;
136
137 if (nonblocking)
138 return __v4l2_event_dequeue(fh, event);
139
Hans Verkuilee6869a2010-09-26 08:47:38 -0300140 /* Release the vdev lock while waiting */
141 if (fh->vdev->lock)
142 mutex_unlock(fh->vdev->lock);
143
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300144 do {
145 ret = wait_event_interruptible(events->wait,
146 events->navailable != 0);
147 if (ret < 0)
Hans Verkuilee6869a2010-09-26 08:47:38 -0300148 break;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300149
150 ret = __v4l2_event_dequeue(fh, event);
151 } while (ret == -ENOENT);
152
Hans Verkuilee6869a2010-09-26 08:47:38 -0300153 if (fh->vdev->lock)
154 mutex_lock(fh->vdev->lock);
155
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300156 return ret;
157}
Laurent Pinchart0a4f8d02010-05-02 14:32:43 -0300158EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300159
Hans Verkuil6e239392011-06-07 11:13:44 -0300160/* Caller must hold fh->vdev->fh_lock! */
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300161static struct v4l2_subscribed_event *v4l2_event_subscribed(
Hans Verkuil6e239392011-06-07 11:13:44 -0300162 struct v4l2_fh *fh, u32 type, u32 id)
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300163{
164 struct v4l2_events *events = fh->events;
165 struct v4l2_subscribed_event *sev;
166
Sakari Ailusf3cd3852010-05-03 12:42:46 -0300167 assert_spin_locked(&fh->vdev->fh_lock);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300168
169 list_for_each_entry(sev, &events->subscribed, list) {
Hans Verkuil6e239392011-06-07 11:13:44 -0300170 if (sev->type == type && sev->id == id)
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300171 return sev;
172 }
173
174 return NULL;
175}
176
Hans Verkuil6e239392011-06-07 11:13:44 -0300177static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
178 const struct timespec *ts)
179{
180 struct v4l2_events *events = fh->events;
181 struct v4l2_subscribed_event *sev;
182 struct v4l2_kevent *kev;
183
184 /* Are we subscribed? */
185 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
186 if (sev == NULL)
187 return;
188
189 /* Increase event sequence number on fh. */
190 events->sequence++;
191
192 /* Do we have any free events? */
193 if (list_empty(&events->free))
194 return;
195
196 /* Take one and fill it. */
197 kev = list_first_entry(&events->free, struct v4l2_kevent, list);
198 kev->event.type = ev->type;
199 kev->event.u = ev->u;
200 kev->event.id = ev->id;
201 kev->event.timestamp = *ts;
202 kev->event.sequence = events->sequence;
203 list_move_tail(&kev->list, &events->available);
204
205 events->navailable++;
206
207 wake_up_all(&events->wait);
208}
209
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300210void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
211{
212 struct v4l2_fh *fh;
213 unsigned long flags;
214 struct timespec timestamp;
215
216 ktime_get_ts(&timestamp);
217
218 spin_lock_irqsave(&vdev->fh_lock, flags);
219
220 list_for_each_entry(fh, &vdev->fh_list, list) {
Hans Verkuil6e239392011-06-07 11:13:44 -0300221 __v4l2_event_queue_fh(fh, ev, &timestamp);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300222 }
223
224 spin_unlock_irqrestore(&vdev->fh_lock, flags);
225}
226EXPORT_SYMBOL_GPL(v4l2_event_queue);
227
Hans Verkuil6e239392011-06-07 11:13:44 -0300228void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
229{
230 unsigned long flags;
231 struct timespec timestamp;
232
233 ktime_get_ts(&timestamp);
234
235 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
236 __v4l2_event_queue_fh(fh, ev, &timestamp);
237 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
238}
239EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
240
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300241int v4l2_event_pending(struct v4l2_fh *fh)
242{
243 return fh->events->navailable;
244}
245EXPORT_SYMBOL_GPL(v4l2_event_pending);
246
247int v4l2_event_subscribe(struct v4l2_fh *fh,
248 struct v4l2_event_subscription *sub)
249{
250 struct v4l2_events *events = fh->events;
Hans Verkuil6e239392011-06-07 11:13:44 -0300251 struct v4l2_subscribed_event *sev, *found_ev;
252 struct v4l2_ctrl *ctrl = NULL;
253 struct v4l2_ctrl_fh *ctrl_fh = NULL;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300254 unsigned long flags;
255
256 if (fh->events == NULL) {
257 WARN_ON(1);
258 return -ENOMEM;
259 }
260
Hans Verkuil6e239392011-06-07 11:13:44 -0300261 if (sub->type == V4L2_EVENT_CTRL) {
262 ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
263 if (ctrl == NULL)
264 return -EINVAL;
265 }
266
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300267 sev = kmalloc(sizeof(*sev), GFP_KERNEL);
268 if (!sev)
269 return -ENOMEM;
Hans Verkuil6e239392011-06-07 11:13:44 -0300270 if (ctrl) {
271 ctrl_fh = kzalloc(sizeof(*ctrl_fh), GFP_KERNEL);
272 if (!ctrl_fh) {
273 kfree(sev);
274 return -ENOMEM;
275 }
276 ctrl_fh->fh = fh;
277 }
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300278
279 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
280
Hans Verkuil6e239392011-06-07 11:13:44 -0300281 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
282 if (!found_ev) {
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300283 INIT_LIST_HEAD(&sev->list);
284 sev->type = sub->type;
Hans Verkuil6e239392011-06-07 11:13:44 -0300285 sev->id = sub->id;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300286
287 list_add(&sev->list, &events->subscribed);
288 sev = NULL;
289 }
290
291 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
292
Hans Verkuil6e239392011-06-07 11:13:44 -0300293 /* v4l2_ctrl_add_fh uses a mutex, so do this outside the spin lock */
294 if (ctrl) {
295 if (found_ev)
296 kfree(ctrl_fh);
297 else
298 v4l2_ctrl_add_fh(fh->ctrl_handler, ctrl_fh, sub);
299 }
300
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300301 kfree(sev);
302
303 return 0;
304}
305EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
306
307static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
308{
309 struct v4l2_events *events = fh->events;
Hans Verkuil6e239392011-06-07 11:13:44 -0300310 struct v4l2_event_subscription sub;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300311 struct v4l2_subscribed_event *sev;
312 unsigned long flags;
313
314 do {
315 sev = NULL;
316
317 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
318 if (!list_empty(&events->subscribed)) {
319 sev = list_first_entry(&events->subscribed,
Hans Verkuil6e239392011-06-07 11:13:44 -0300320 struct v4l2_subscribed_event, list);
321 sub.type = sev->type;
322 sub.id = sev->id;
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300323 }
324 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
Hans Verkuil6e239392011-06-07 11:13:44 -0300325 if (sev)
326 v4l2_event_unsubscribe(fh, &sub);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300327 } while (sev);
328}
329
330int v4l2_event_unsubscribe(struct v4l2_fh *fh,
331 struct v4l2_event_subscription *sub)
332{
333 struct v4l2_subscribed_event *sev;
334 unsigned long flags;
335
336 if (sub->type == V4L2_EVENT_ALL) {
337 v4l2_event_unsubscribe_all(fh);
338 return 0;
339 }
340
341 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
342
Hans Verkuil6e239392011-06-07 11:13:44 -0300343 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300344 if (sev != NULL)
345 list_del(&sev->list);
346
347 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
Hans Verkuil6e239392011-06-07 11:13:44 -0300348 if (sev->type == V4L2_EVENT_CTRL) {
349 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id);
350
351 if (ctrl)
352 v4l2_ctrl_del_fh(ctrl, fh);
353 }
Sakari Ailusc3b5b022010-03-01 05:14:18 -0300354
355 kfree(sev);
356
357 return 0;
358}
359EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);