]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/media/v4l2-core/v4l2-event.c
Merge tag 'modules-for-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux
[linux.git] / drivers / media / v4l2-core / v4l2-event.c
1 /*
2  * v4l2-event.c
3  *
4  * V4L2 events.
5  *
6  * Copyright (C) 2009--2010 Nokia Corporation.
7  *
8  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  */
19
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
23
24 #include <linux/mm.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/export.h>
28
29 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
30 {
31         idx += sev->first;
32         return idx >= sev->elems ? idx - sev->elems : idx;
33 }
34
35 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
36 {
37         struct v4l2_kevent *kev;
38         unsigned long flags;
39
40         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
41
42         if (list_empty(&fh->available)) {
43                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
44                 return -ENOENT;
45         }
46
47         WARN_ON(fh->navailable == 0);
48
49         kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
50         list_del(&kev->list);
51         fh->navailable--;
52
53         kev->event.pending = fh->navailable;
54         *event = kev->event;
55         kev->sev->first = sev_pos(kev->sev, 1);
56         kev->sev->in_use--;
57
58         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
59
60         return 0;
61 }
62
63 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
64                        int nonblocking)
65 {
66         int ret;
67
68         if (nonblocking)
69                 return __v4l2_event_dequeue(fh, event);
70
71         /* Release the vdev lock while waiting */
72         if (fh->vdev->lock)
73                 mutex_unlock(fh->vdev->lock);
74
75         do {
76                 ret = wait_event_interruptible(fh->wait,
77                                                fh->navailable != 0);
78                 if (ret < 0)
79                         break;
80
81                 ret = __v4l2_event_dequeue(fh, event);
82         } while (ret == -ENOENT);
83
84         if (fh->vdev->lock)
85                 mutex_lock(fh->vdev->lock);
86
87         return ret;
88 }
89 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
90
91 /* Caller must hold fh->vdev->fh_lock! */
92 static struct v4l2_subscribed_event *v4l2_event_subscribed(
93                 struct v4l2_fh *fh, u32 type, u32 id)
94 {
95         struct v4l2_subscribed_event *sev;
96
97         assert_spin_locked(&fh->vdev->fh_lock);
98
99         list_for_each_entry(sev, &fh->subscribed, list)
100                 if (sev->type == type && sev->id == id)
101                         return sev;
102
103         return NULL;
104 }
105
106 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
107                 const struct timespec *ts)
108 {
109         struct v4l2_subscribed_event *sev;
110         struct v4l2_kevent *kev;
111         bool copy_payload = true;
112
113         /* Are we subscribed? */
114         sev = v4l2_event_subscribed(fh, ev->type, ev->id);
115         if (sev == NULL)
116                 return;
117
118         /*
119          * If the event has been added to the fh->subscribed list, but its
120          * add op has not completed yet elems will be 0, treat this as
121          * not being subscribed.
122          */
123         if (!sev->elems)
124                 return;
125
126         /* Increase event sequence number on fh. */
127         fh->sequence++;
128
129         /* Do we have any free events? */
130         if (sev->in_use == sev->elems) {
131                 /* no, remove the oldest one */
132                 kev = sev->events + sev_pos(sev, 0);
133                 list_del(&kev->list);
134                 sev->in_use--;
135                 sev->first = sev_pos(sev, 1);
136                 fh->navailable--;
137                 if (sev->elems == 1) {
138                         if (sev->ops && sev->ops->replace) {
139                                 sev->ops->replace(&kev->event, ev);
140                                 copy_payload = false;
141                         }
142                 } else if (sev->ops && sev->ops->merge) {
143                         struct v4l2_kevent *second_oldest =
144                                 sev->events + sev_pos(sev, 0);
145                         sev->ops->merge(&kev->event, &second_oldest->event);
146                 }
147         }
148
149         /* Take one and fill it. */
150         kev = sev->events + sev_pos(sev, sev->in_use);
151         kev->event.type = ev->type;
152         if (copy_payload)
153                 kev->event.u = ev->u;
154         kev->event.id = ev->id;
155         kev->event.timestamp = *ts;
156         kev->event.sequence = fh->sequence;
157         sev->in_use++;
158         list_add_tail(&kev->list, &fh->available);
159
160         fh->navailable++;
161
162         wake_up_all(&fh->wait);
163 }
164
165 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
166 {
167         struct v4l2_fh *fh;
168         unsigned long flags;
169         struct timespec timestamp;
170
171         if (vdev == NULL)
172                 return;
173
174         ktime_get_ts(&timestamp);
175
176         spin_lock_irqsave(&vdev->fh_lock, flags);
177
178         list_for_each_entry(fh, &vdev->fh_list, list)
179                 __v4l2_event_queue_fh(fh, ev, &timestamp);
180
181         spin_unlock_irqrestore(&vdev->fh_lock, flags);
182 }
183 EXPORT_SYMBOL_GPL(v4l2_event_queue);
184
185 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
186 {
187         unsigned long flags;
188         struct timespec timestamp;
189
190         ktime_get_ts(&timestamp);
191
192         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
193         __v4l2_event_queue_fh(fh, ev, &timestamp);
194         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
195 }
196 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
197
198 int v4l2_event_pending(struct v4l2_fh *fh)
199 {
200         return fh->navailable;
201 }
202 EXPORT_SYMBOL_GPL(v4l2_event_pending);
203
204 int v4l2_event_subscribe(struct v4l2_fh *fh,
205                          const struct v4l2_event_subscription *sub, unsigned elems,
206                          const struct v4l2_subscribed_event_ops *ops)
207 {
208         struct v4l2_subscribed_event *sev, *found_ev;
209         unsigned long flags;
210         unsigned i;
211
212         if (sub->type == V4L2_EVENT_ALL)
213                 return -EINVAL;
214
215         if (elems < 1)
216                 elems = 1;
217
218         sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
219         if (!sev)
220                 return -ENOMEM;
221         for (i = 0; i < elems; i++)
222                 sev->events[i].sev = sev;
223         sev->type = sub->type;
224         sev->id = sub->id;
225         sev->flags = sub->flags;
226         sev->fh = fh;
227         sev->ops = ops;
228
229         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
230         found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
231         if (!found_ev)
232                 list_add(&sev->list, &fh->subscribed);
233         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
234
235         if (found_ev) {
236                 kvfree(sev);
237                 return 0; /* Already listening */
238         }
239
240         if (sev->ops && sev->ops->add) {
241                 int ret = sev->ops->add(sev, elems);
242                 if (ret) {
243                         sev->ops = NULL;
244                         v4l2_event_unsubscribe(fh, sub);
245                         return ret;
246                 }
247         }
248
249         /* Mark as ready for use */
250         sev->elems = elems;
251
252         return 0;
253 }
254 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
255
256 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
257 {
258         struct v4l2_event_subscription sub;
259         struct v4l2_subscribed_event *sev;
260         unsigned long flags;
261
262         do {
263                 sev = NULL;
264
265                 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
266                 if (!list_empty(&fh->subscribed)) {
267                         sev = list_first_entry(&fh->subscribed,
268                                         struct v4l2_subscribed_event, list);
269                         sub.type = sev->type;
270                         sub.id = sev->id;
271                 }
272                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
273                 if (sev)
274                         v4l2_event_unsubscribe(fh, &sub);
275         } while (sev);
276 }
277 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
278
279 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
280                            const struct v4l2_event_subscription *sub)
281 {
282         struct v4l2_subscribed_event *sev;
283         unsigned long flags;
284         int i;
285
286         if (sub->type == V4L2_EVENT_ALL) {
287                 v4l2_event_unsubscribe_all(fh);
288                 return 0;
289         }
290
291         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
292
293         sev = v4l2_event_subscribed(fh, sub->type, sub->id);
294         if (sev != NULL) {
295                 /* Remove any pending events for this subscription */
296                 for (i = 0; i < sev->in_use; i++) {
297                         list_del(&sev->events[sev_pos(sev, i)].list);
298                         fh->navailable--;
299                 }
300                 list_del(&sev->list);
301         }
302
303         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
304
305         if (sev && sev->ops && sev->ops->del)
306                 sev->ops->del(sev);
307
308         kvfree(sev);
309
310         return 0;
311 }
312 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
313
314 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
315                                   struct v4l2_event_subscription *sub)
316 {
317         return v4l2_event_unsubscribe(fh, sub);
318 }
319 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
320
321 static void v4l2_event_src_replace(struct v4l2_event *old,
322                                 const struct v4l2_event *new)
323 {
324         u32 old_changes = old->u.src_change.changes;
325
326         old->u.src_change = new->u.src_change;
327         old->u.src_change.changes |= old_changes;
328 }
329
330 static void v4l2_event_src_merge(const struct v4l2_event *old,
331                                 struct v4l2_event *new)
332 {
333         new->u.src_change.changes |= old->u.src_change.changes;
334 }
335
336 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
337         .replace = v4l2_event_src_replace,
338         .merge = v4l2_event_src_merge,
339 };
340
341 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
342                                 const struct v4l2_event_subscription *sub)
343 {
344         if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
345                 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
346         return -EINVAL;
347 }
348 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
349
350 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
351                 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
352 {
353         return v4l2_src_change_event_subscribe(fh, sub);
354 }
355 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);