| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 2 | *  fs/eventpoll.c (Efficent event polling implementation) | 
|  | 3 | *  Copyright (C) 2001,...,2007	 Davide Libenzi | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * | 
|  | 5 | *  This program is free software; you can redistribute it and/or modify | 
|  | 6 | *  it under the terms of the GNU General Public License as published by | 
|  | 7 | *  the Free Software Foundation; either version 2 of the License, or | 
|  | 8 | *  (at your option) any later version. | 
|  | 9 | * | 
|  | 10 | *  Davide Libenzi <davidel@xmailserver.org> | 
|  | 11 | * | 
|  | 12 | */ | 
|  | 13 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/init.h> | 
|  | 15 | #include <linux/kernel.h> | 
|  | 16 | #include <linux/sched.h> | 
|  | 17 | #include <linux/fs.h> | 
|  | 18 | #include <linux/file.h> | 
|  | 19 | #include <linux/signal.h> | 
|  | 20 | #include <linux/errno.h> | 
|  | 21 | #include <linux/mm.h> | 
|  | 22 | #include <linux/slab.h> | 
|  | 23 | #include <linux/poll.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/string.h> | 
|  | 25 | #include <linux/list.h> | 
|  | 26 | #include <linux/hash.h> | 
|  | 27 | #include <linux/spinlock.h> | 
|  | 28 | #include <linux/syscalls.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/rbtree.h> | 
|  | 30 | #include <linux/wait.h> | 
|  | 31 | #include <linux/eventpoll.h> | 
|  | 32 | #include <linux/mount.h> | 
|  | 33 | #include <linux/bitops.h> | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 34 | #include <linux/mutex.h> | 
| Davide Libenzi | da66f7c | 2007-05-10 22:23:21 -0700 | [diff] [blame] | 35 | #include <linux/anon_inodes.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <asm/uaccess.h> | 
|  | 37 | #include <asm/system.h> | 
|  | 38 | #include <asm/io.h> | 
|  | 39 | #include <asm/mman.h> | 
|  | 40 | #include <asm/atomic.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | /* | 
|  | 43 | * LOCKING: | 
|  | 44 | * There are three level of locking required by epoll : | 
|  | 45 | * | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 46 | * 1) epmutex (mutex) | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 47 | * 2) ep->mtx (mutex) | 
|  | 48 | * 3) ep->lock (spinlock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | * | 
|  | 50 | * The acquire order is the one listed above, from 1 to 3. | 
|  | 51 | * We need a spinlock (ep->lock) because we manipulate objects | 
|  | 52 | * from inside the poll callback, that might be triggered from | 
|  | 53 | * a wake_up() that in turn might be called from IRQ context. | 
|  | 54 | * So we can't sleep inside the poll callback and hence we need | 
|  | 55 | * a spinlock. During the event transfer loop (from kernel to | 
|  | 56 | * user space) we could end up sleeping due a copy_to_user(), so | 
|  | 57 | * we need a lock that will allow us to sleep. This lock is a | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 58 | * mutex (ep->mtx). It is acquired during the event transfer loop, | 
|  | 59 | * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file(). | 
|  | 60 | * Then we also need a global mutex to serialize eventpoll_release_file() | 
|  | 61 | * and ep_free(). | 
|  | 62 | * This mutex is acquired by ep_free() during the epoll file | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | * cleanup path and it is also acquired by eventpoll_release_file() | 
|  | 64 | * if a file has been pushed inside an epoll set and it is then | 
|  | 65 | * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 66 | * It is possible to drop the "ep->mtx" and to use the global | 
|  | 67 | * mutex "epmutex" (together with "ep->lock") to have it working, | 
|  | 68 | * but having "ep->mtx" will make the interface more scalable. | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 69 | * Events that require holding "epmutex" are very rare, while for | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 70 | * normal operations the epoll private "ep->mtx" will guarantee | 
|  | 71 | * a better scalability. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | */ | 
|  | 73 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | #define DEBUG_EPOLL 0 | 
|  | 75 |  | 
|  | 76 | #if DEBUG_EPOLL > 0 | 
|  | 77 | #define DPRINTK(x) printk x | 
|  | 78 | #define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0) | 
|  | 79 | #else /* #if DEBUG_EPOLL > 0 */ | 
|  | 80 | #define DPRINTK(x) (void) 0 | 
|  | 81 | #define DNPRINTK(n, x) (void) 0 | 
|  | 82 | #endif /* #if DEBUG_EPOLL > 0 */ | 
|  | 83 |  | 
|  | 84 | #define DEBUG_EPI 0 | 
|  | 85 |  | 
|  | 86 | #if DEBUG_EPI != 0 | 
|  | 87 | #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */) | 
|  | 88 | #else /* #if DEBUG_EPI != 0 */ | 
|  | 89 | #define EPI_SLAB_DEBUG 0 | 
|  | 90 | #endif /* #if DEBUG_EPI != 0 */ | 
|  | 91 |  | 
|  | 92 | /* Epoll private bits inside the event mask */ | 
|  | 93 | #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET) | 
|  | 94 |  | 
|  | 95 | /* Maximum number of poll wake up nests we are allowing */ | 
|  | 96 | #define EP_MAX_POLLWAKE_NESTS 4 | 
|  | 97 |  | 
| Davide Libenzi | e3306dd | 2005-09-27 21:45:33 -0700 | [diff] [blame] | 98 | /* Maximum msec timeout value storeable in a long int */ | 
|  | 99 | #define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ) | 
|  | 100 |  | 
| Davide Libenzi | b611967 | 2006-10-11 01:21:44 -0700 | [diff] [blame] | 101 | #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) | 
|  | 102 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 103 | #define EP_UNACTIVE_PTR ((void *) -1L) | 
|  | 104 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | struct epoll_filefd { | 
|  | 106 | struct file *file; | 
|  | 107 | int fd; | 
|  | 108 | }; | 
|  | 109 |  | 
|  | 110 | /* | 
|  | 111 | * Node that is linked into the "wake_task_list" member of the "struct poll_safewake". | 
|  | 112 | * It is used to keep track on all tasks that are currently inside the wake_up() code | 
|  | 113 | * to 1) short-circuit the one coming from the same task and same wait queue head | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 114 | * (loop) 2) allow a maximum number of epoll descriptors inclusion nesting | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | * 3) let go the ones coming from other tasks. | 
|  | 116 | */ | 
|  | 117 | struct wake_task_node { | 
|  | 118 | struct list_head llink; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 119 | struct task_struct *task; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | wait_queue_head_t *wq; | 
|  | 121 | }; | 
|  | 122 |  | 
|  | 123 | /* | 
|  | 124 | * This is used to implement the safe poll wake up avoiding to reenter | 
|  | 125 | * the poll callback from inside wake_up(). | 
|  | 126 | */ | 
|  | 127 | struct poll_safewake { | 
|  | 128 | struct list_head wake_task_list; | 
|  | 129 | spinlock_t lock; | 
|  | 130 | }; | 
|  | 131 |  | 
|  | 132 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | * Each file descriptor added to the eventpoll interface will | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 134 | * have an entry of this type linked to the "rbr" RB tree. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | */ | 
|  | 136 | struct epitem { | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 137 | /* RB tree node used to link this structure to the eventpoll RB tree */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | struct rb_node rbn; | 
|  | 139 |  | 
|  | 140 | /* List header used to link this structure to the eventpoll ready list */ | 
|  | 141 | struct list_head rdllink; | 
|  | 142 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 143 | /* | 
|  | 144 | * Works together "struct eventpoll"->ovflist in keeping the | 
|  | 145 | * single linked chain of items. | 
|  | 146 | */ | 
|  | 147 | struct epitem *next; | 
|  | 148 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | /* The file descriptor information this item refers to */ | 
|  | 150 | struct epoll_filefd ffd; | 
|  | 151 |  | 
|  | 152 | /* Number of active wait queue attached to poll operations */ | 
|  | 153 | int nwait; | 
|  | 154 |  | 
|  | 155 | /* List containing poll wait queues */ | 
|  | 156 | struct list_head pwqlist; | 
|  | 157 |  | 
|  | 158 | /* The "container" of this item */ | 
|  | 159 | struct eventpoll *ep; | 
|  | 160 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | /* List header used to link this item to the "struct file" items list */ | 
|  | 162 | struct list_head fllink; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 163 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 164 | /* The structure that describe the interested events and the source fd */ | 
|  | 165 | struct epoll_event event; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 166 | }; | 
|  | 167 |  | 
|  | 168 | /* | 
|  | 169 | * This structure is stored inside the "private_data" member of the file | 
|  | 170 | * structure and rapresent the main data sructure for the eventpoll | 
|  | 171 | * interface. | 
|  | 172 | */ | 
|  | 173 | struct eventpoll { | 
|  | 174 | /* Protect the this structure access */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 175 | spinlock_t lock; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 176 |  | 
|  | 177 | /* | 
|  | 178 | * This mutex is used to ensure that files are not removed | 
|  | 179 | * while epoll is using them. This is held during the event | 
|  | 180 | * collection loop, the file cleanup path, the epoll file exit | 
|  | 181 | * code and the ctl operations. | 
|  | 182 | */ | 
|  | 183 | struct mutex mtx; | 
|  | 184 |  | 
|  | 185 | /* Wait queue used by sys_epoll_wait() */ | 
|  | 186 | wait_queue_head_t wq; | 
|  | 187 |  | 
|  | 188 | /* Wait queue used by file->poll() */ | 
|  | 189 | wait_queue_head_t poll_wait; | 
|  | 190 |  | 
|  | 191 | /* List of ready file descriptors */ | 
|  | 192 | struct list_head rdllist; | 
|  | 193 |  | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 194 | /* RB tree root used to store monitored fd structs */ | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 195 | struct rb_root rbr; | 
|  | 196 |  | 
|  | 197 | /* | 
|  | 198 | * This is a single linked list that chains all the "struct epitem" that | 
|  | 199 | * happened while transfering ready events to userspace w/out | 
|  | 200 | * holding ->lock. | 
|  | 201 | */ | 
|  | 202 | struct epitem *ovflist; | 
|  | 203 | }; | 
|  | 204 |  | 
|  | 205 | /* Wait structure used by the poll hooks */ | 
|  | 206 | struct eppoll_entry { | 
|  | 207 | /* List header used to link this structure to the "struct epitem" */ | 
|  | 208 | struct list_head llink; | 
|  | 209 |  | 
|  | 210 | /* The "base" pointer is set to the container "struct epitem" */ | 
|  | 211 | void *base; | 
|  | 212 |  | 
|  | 213 | /* | 
|  | 214 | * Wait queue item that will be linked to the target file wait | 
|  | 215 | * queue head. | 
|  | 216 | */ | 
|  | 217 | wait_queue_t wait; | 
|  | 218 |  | 
|  | 219 | /* The wait queue head that linked the "wait" wait queue item */ | 
|  | 220 | wait_queue_head_t *whead; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | }; | 
|  | 222 |  | 
|  | 223 | /* Wrapper struct used by poll queueing */ | 
|  | 224 | struct ep_pqueue { | 
|  | 225 | poll_table pt; | 
|  | 226 | struct epitem *epi; | 
|  | 227 | }; | 
|  | 228 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | /* | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 230 | * This mutex is used to serialize ep_free() and eventpoll_release_file(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | */ | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 232 | static struct mutex epmutex; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 |  | 
|  | 234 | /* Safe wake up implementation */ | 
|  | 235 | static struct poll_safewake psw; | 
|  | 236 |  | 
|  | 237 | /* Slab cache used to allocate "struct epitem" */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 238 | static struct kmem_cache *epi_cache __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 |  | 
|  | 240 | /* Slab cache used to allocate "struct eppoll_entry" */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 241 | static struct kmem_cache *pwq_cache __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 |  | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 243 |  | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 244 | /* Setup the structure that is used as key for the RB tree */ | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 245 | static inline void ep_set_ffd(struct epoll_filefd *ffd, | 
|  | 246 | struct file *file, int fd) | 
|  | 247 | { | 
|  | 248 | ffd->file = file; | 
|  | 249 | ffd->fd = fd; | 
|  | 250 | } | 
|  | 251 |  | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 252 | /* Compare RB tree keys */ | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 253 | static inline int ep_cmp_ffd(struct epoll_filefd *p1, | 
|  | 254 | struct epoll_filefd *p2) | 
|  | 255 | { | 
|  | 256 | return (p1->file > p2->file ? +1: | 
|  | 257 | (p1->file < p2->file ? -1 : p1->fd - p2->fd)); | 
|  | 258 | } | 
|  | 259 |  | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 260 | /* Tells us if the item is currently linked */ | 
|  | 261 | static inline int ep_is_linked(struct list_head *p) | 
|  | 262 | { | 
|  | 263 | return !list_empty(p); | 
|  | 264 | } | 
|  | 265 |  | 
|  | 266 | /* Get the "struct epitem" from a wait queue pointer */ | 
| Davide Libenzi | cdac75e | 2008-04-29 00:58:34 -0700 | [diff] [blame] | 267 | static inline struct epitem *ep_item_from_wait(wait_queue_t *p) | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 268 | { | 
|  | 269 | return container_of(p, struct eppoll_entry, wait)->base; | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | /* Get the "struct epitem" from an epoll queue wrapper */ | 
| Davide Libenzi | cdac75e | 2008-04-29 00:58:34 -0700 | [diff] [blame] | 273 | static inline struct epitem *ep_item_from_epqueue(poll_table *p) | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 274 | { | 
|  | 275 | return container_of(p, struct ep_pqueue, pt)->epi; | 
|  | 276 | } | 
|  | 277 |  | 
|  | 278 | /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 279 | static inline int ep_op_has_event(int op) | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 280 | { | 
|  | 281 | return op != EPOLL_CTL_DEL; | 
|  | 282 | } | 
|  | 283 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | /* Initialize the poll safe wake up structure */ | 
|  | 285 | static void ep_poll_safewake_init(struct poll_safewake *psw) | 
|  | 286 | { | 
|  | 287 |  | 
|  | 288 | INIT_LIST_HEAD(&psw->wake_task_list); | 
|  | 289 | spin_lock_init(&psw->lock); | 
|  | 290 | } | 
|  | 291 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | /* | 
|  | 293 | * Perform a safe wake up of the poll wait list. The problem is that | 
|  | 294 | * with the new callback'd wake up system, it is possible that the | 
|  | 295 | * poll callback is reentered from inside the call to wake_up() done | 
|  | 296 | * on the poll wait queue head. The rule is that we cannot reenter the | 
|  | 297 | * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times, | 
|  | 298 | * and we cannot reenter the same wait queue head at all. This will | 
|  | 299 | * enable to have a hierarchy of epoll file descriptor of no more than | 
|  | 300 | * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock | 
|  | 301 | * because this one gets called by the poll callback, that in turn is called | 
|  | 302 | * from inside a wake_up(), that might be called from irq context. | 
|  | 303 | */ | 
|  | 304 | static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) | 
|  | 305 | { | 
|  | 306 | int wake_nests = 0; | 
|  | 307 | unsigned long flags; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 308 | struct task_struct *this_task = current; | 
| Matthias Kaehlcke | b70c394 | 2007-10-18 23:39:56 -0700 | [diff] [blame] | 309 | struct list_head *lsthead = &psw->wake_task_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | struct wake_task_node *tncur; | 
|  | 311 | struct wake_task_node tnode; | 
|  | 312 |  | 
|  | 313 | spin_lock_irqsave(&psw->lock, flags); | 
|  | 314 |  | 
|  | 315 | /* Try to see if the current task is already inside this wakeup call */ | 
| Matthias Kaehlcke | b70c394 | 2007-10-18 23:39:56 -0700 | [diff] [blame] | 316 | list_for_each_entry(tncur, lsthead, llink) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 |  | 
|  | 318 | if (tncur->wq == wq || | 
|  | 319 | (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) { | 
|  | 320 | /* | 
|  | 321 | * Ops ... loop detected or maximum nest level reached. | 
|  | 322 | * We abort this wake by breaking the cycle itself. | 
|  | 323 | */ | 
|  | 324 | spin_unlock_irqrestore(&psw->lock, flags); | 
|  | 325 | return; | 
|  | 326 | } | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | /* Add the current task to the list */ | 
|  | 330 | tnode.task = this_task; | 
|  | 331 | tnode.wq = wq; | 
|  | 332 | list_add(&tnode.llink, lsthead); | 
|  | 333 |  | 
|  | 334 | spin_unlock_irqrestore(&psw->lock, flags); | 
|  | 335 |  | 
|  | 336 | /* Do really wake up now */ | 
| Peter Zijlstra | 0ccf831 | 2008-02-04 22:27:20 -0800 | [diff] [blame] | 337 | wake_up_nested(wq, 1 + wake_nests); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 |  | 
|  | 339 | /* Remove the current task from the list */ | 
|  | 340 | spin_lock_irqsave(&psw->lock, flags); | 
|  | 341 | list_del(&tnode.llink); | 
|  | 342 | spin_unlock_irqrestore(&psw->lock, flags); | 
|  | 343 | } | 
|  | 344 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 345 | /* | 
|  | 346 | * This function unregister poll callbacks from the associated file descriptor. | 
|  | 347 | * Since this must be called without holding "ep->lock" the atomic exchange trick | 
|  | 348 | * will protect us from multiple unregister. | 
|  | 349 | */ | 
|  | 350 | static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) | 
|  | 351 | { | 
|  | 352 | int nwait; | 
|  | 353 | struct list_head *lsthead = &epi->pwqlist; | 
|  | 354 | struct eppoll_entry *pwq; | 
|  | 355 |  | 
|  | 356 | /* This is called without locks, so we need the atomic exchange */ | 
|  | 357 | nwait = xchg(&epi->nwait, 0); | 
|  | 358 |  | 
|  | 359 | if (nwait) { | 
|  | 360 | while (!list_empty(lsthead)) { | 
|  | 361 | pwq = list_first_entry(lsthead, struct eppoll_entry, llink); | 
|  | 362 |  | 
|  | 363 | list_del_init(&pwq->llink); | 
|  | 364 | remove_wait_queue(pwq->whead, &pwq->wait); | 
|  | 365 | kmem_cache_free(pwq_cache, pwq); | 
|  | 366 | } | 
|  | 367 | } | 
|  | 368 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | /* | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 371 | * Removes a "struct epitem" from the eventpoll RB tree and deallocates | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 372 | * all the associated resources. Must be called with "mtx" held. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 373 | */ | 
|  | 374 | static int ep_remove(struct eventpoll *ep, struct epitem *epi) | 
|  | 375 | { | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 376 | unsigned long flags; | 
|  | 377 | struct file *file = epi->ffd.file; | 
|  | 378 |  | 
|  | 379 | /* | 
|  | 380 | * Removes poll wait queue hooks. We _have_ to do this without holding | 
|  | 381 | * the "ep->lock" otherwise a deadlock might occur. This because of the | 
|  | 382 | * sequence of the lock acquisition. Here we do "ep->lock" then the wait | 
|  | 383 | * queue head lock when unregistering the wait queue. The wakeup callback | 
|  | 384 | * will run by holding the wait queue head lock and will call our callback | 
|  | 385 | * that will try to get "ep->lock". | 
|  | 386 | */ | 
|  | 387 | ep_unregister_pollwait(ep, epi); | 
|  | 388 |  | 
|  | 389 | /* Remove the current item from the list of epoll hooks */ | 
|  | 390 | spin_lock(&file->f_ep_lock); | 
|  | 391 | if (ep_is_linked(&epi->fllink)) | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 392 | list_del_init(&epi->fllink); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 393 | spin_unlock(&file->f_ep_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 |  | 
| Davide Libenzi | cdac75e | 2008-04-29 00:58:34 -0700 | [diff] [blame] | 395 | rb_erase(&epi->rbn, &ep->rbr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 397 | spin_lock_irqsave(&ep->lock, flags); | 
|  | 398 | if (ep_is_linked(&epi->rdllink)) | 
|  | 399 | list_del_init(&epi->rdllink); | 
|  | 400 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 402 | /* At this point it is safe to free the eventpoll item */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 403 | kmem_cache_free(epi_cache, epi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 405 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n", | 
|  | 406 | current, ep, file)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 408 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | } | 
|  | 410 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | static void ep_free(struct eventpoll *ep) | 
|  | 412 | { | 
|  | 413 | struct rb_node *rbp; | 
|  | 414 | struct epitem *epi; | 
|  | 415 |  | 
|  | 416 | /* We need to release all tasks waiting for these file */ | 
|  | 417 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 418 | ep_poll_safewake(&psw, &ep->poll_wait); | 
|  | 419 |  | 
|  | 420 | /* | 
|  | 421 | * We need to lock this because we could be hit by | 
|  | 422 | * eventpoll_release_file() while we're freeing the "struct eventpoll". | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 423 | * We do not need to hold "ep->mtx" here because the epoll file | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | * is on the way to be removed and no one has references to it | 
|  | 425 | * anymore. The only hit might come from eventpoll_release_file() but | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 426 | * holding "epmutex" is sufficent here. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | */ | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 428 | mutex_lock(&epmutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 |  | 
|  | 430 | /* | 
|  | 431 | * Walks through the whole tree by unregistering poll callbacks. | 
|  | 432 | */ | 
|  | 433 | for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { | 
|  | 434 | epi = rb_entry(rbp, struct epitem, rbn); | 
|  | 435 |  | 
|  | 436 | ep_unregister_pollwait(ep, epi); | 
|  | 437 | } | 
|  | 438 |  | 
|  | 439 | /* | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 440 | * Walks through the whole tree by freeing each "struct epitem". At this | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | * point we are sure no poll callbacks will be lingering around, and also by | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 442 | * holding "epmutex" we can be sure that no file cleanup code will hit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | * us during this operation. So we can avoid the lock on "ep->lock". | 
|  | 444 | */ | 
| Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 445 | while ((rbp = rb_first(&ep->rbr)) != NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | epi = rb_entry(rbp, struct epitem, rbn); | 
|  | 447 | ep_remove(ep, epi); | 
|  | 448 | } | 
|  | 449 |  | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 450 | mutex_unlock(&epmutex); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 451 | mutex_destroy(&ep->mtx); | 
| Davide Libenzi | f0ee9aa | 2007-05-15 01:40:57 -0700 | [diff] [blame] | 452 | kfree(ep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | } | 
|  | 454 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 455 | static int ep_eventpoll_release(struct inode *inode, struct file *file) | 
|  | 456 | { | 
|  | 457 | struct eventpoll *ep = file->private_data; | 
|  | 458 |  | 
| Davide Libenzi | f0ee9aa | 2007-05-15 01:40:57 -0700 | [diff] [blame] | 459 | if (ep) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 460 | ep_free(ep); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 461 |  | 
|  | 462 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep)); | 
|  | 463 | return 0; | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 | static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) | 
|  | 467 | { | 
|  | 468 | unsigned int pollflags = 0; | 
|  | 469 | unsigned long flags; | 
|  | 470 | struct eventpoll *ep = file->private_data; | 
|  | 471 |  | 
|  | 472 | /* Insert inside our poll wait queue */ | 
|  | 473 | poll_wait(file, &ep->poll_wait, wait); | 
|  | 474 |  | 
|  | 475 | /* Check our condition */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 476 | spin_lock_irqsave(&ep->lock, flags); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 477 | if (!list_empty(&ep->rdllist)) | 
|  | 478 | pollflags = POLLIN | POLLRDNORM; | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 479 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 480 |  | 
|  | 481 | return pollflags; | 
|  | 482 | } | 
|  | 483 |  | 
|  | 484 | /* File callbacks that implement the eventpoll file behaviour */ | 
|  | 485 | static const struct file_operations eventpoll_fops = { | 
|  | 486 | .release	= ep_eventpoll_release, | 
|  | 487 | .poll		= ep_eventpoll_poll | 
|  | 488 | }; | 
|  | 489 |  | 
|  | 490 | /* Fast test to see if the file is an evenpoll file */ | 
|  | 491 | static inline int is_file_epoll(struct file *f) | 
|  | 492 | { | 
|  | 493 | return f->f_op == &eventpoll_fops; | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | /* | 
|  | 497 | * This is called from eventpoll_release() to unlink files from the eventpoll | 
|  | 498 | * interface. We need to have this facility to cleanup correctly files that are | 
|  | 499 | * closed without being removed from the eventpoll interface. | 
|  | 500 | */ | 
|  | 501 | void eventpoll_release_file(struct file *file) | 
|  | 502 | { | 
|  | 503 | struct list_head *lsthead = &file->f_ep_links; | 
|  | 504 | struct eventpoll *ep; | 
|  | 505 | struct epitem *epi; | 
|  | 506 |  | 
|  | 507 | /* | 
|  | 508 | * We don't want to get "file->f_ep_lock" because it is not | 
|  | 509 | * necessary. It is not necessary because we're in the "struct file" | 
|  | 510 | * cleanup path, and this means that noone is using this file anymore. | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 511 | * So, for example, epoll_ctl() cannot hit here sicne if we reach this | 
|  | 512 | * point, the file counter already went to zero and fget() would fail. | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 513 | * The only hit might come from ep_free() but by holding the mutex | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 514 | * will correctly serialize the operation. We do need to acquire | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 515 | * "ep->mtx" after "epmutex" because ep_remove() requires it when called | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 516 | * from anywhere but ep_free(). | 
|  | 517 | */ | 
|  | 518 | mutex_lock(&epmutex); | 
|  | 519 |  | 
|  | 520 | while (!list_empty(lsthead)) { | 
|  | 521 | epi = list_first_entry(lsthead, struct epitem, fllink); | 
|  | 522 |  | 
|  | 523 | ep = epi->ep; | 
|  | 524 | list_del_init(&epi->fllink); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 525 | mutex_lock(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 526 | ep_remove(ep, epi); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 527 | mutex_unlock(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 528 | } | 
|  | 529 |  | 
|  | 530 | mutex_unlock(&epmutex); | 
|  | 531 | } | 
|  | 532 |  | 
|  | 533 | static int ep_alloc(struct eventpoll **pep) | 
|  | 534 | { | 
|  | 535 | struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL); | 
|  | 536 |  | 
|  | 537 | if (!ep) | 
|  | 538 | return -ENOMEM; | 
|  | 539 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 540 | spin_lock_init(&ep->lock); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 541 | mutex_init(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 542 | init_waitqueue_head(&ep->wq); | 
|  | 543 | init_waitqueue_head(&ep->poll_wait); | 
|  | 544 | INIT_LIST_HEAD(&ep->rdllist); | 
|  | 545 | ep->rbr = RB_ROOT; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 546 | ep->ovflist = EP_UNACTIVE_PTR; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 547 |  | 
|  | 548 | *pep = ep; | 
|  | 549 |  | 
|  | 550 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n", | 
|  | 551 | current, ep)); | 
|  | 552 | return 0; | 
|  | 553 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 |  | 
|  | 555 | /* | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 556 | * Search the file inside the eventpoll tree. The RB tree operations | 
|  | 557 | * are protected by the "mtx" mutex, and ep_find() must be called with | 
|  | 558 | * "mtx" held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | */ | 
|  | 560 | static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) | 
|  | 561 | { | 
|  | 562 | int kcmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | struct rb_node *rbp; | 
|  | 564 | struct epitem *epi, *epir = NULL; | 
|  | 565 | struct epoll_filefd ffd; | 
|  | 566 |  | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 567 | ep_set_ffd(&ffd, file, fd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | for (rbp = ep->rbr.rb_node; rbp; ) { | 
|  | 569 | epi = rb_entry(rbp, struct epitem, rbn); | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 570 | kcmp = ep_cmp_ffd(&ffd, &epi->ffd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | if (kcmp > 0) | 
|  | 572 | rbp = rbp->rb_right; | 
|  | 573 | else if (kcmp < 0) | 
|  | 574 | rbp = rbp->rb_left; | 
|  | 575 | else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | epir = epi; | 
|  | 577 | break; | 
|  | 578 | } | 
|  | 579 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 |  | 
|  | 581 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n", | 
|  | 582 | current, file, epir)); | 
|  | 583 |  | 
|  | 584 | return epir; | 
|  | 585 | } | 
|  | 586 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | /* | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 588 | * This is the callback that is passed to the wait queue wakeup | 
|  | 589 | * machanism. It is called by the stored file descriptors when they | 
|  | 590 | * have events to report. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | */ | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 592 | static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | { | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 594 | int pwake = 0; | 
|  | 595 | unsigned long flags; | 
|  | 596 | struct epitem *epi = ep_item_from_wait(wait); | 
|  | 597 | struct eventpoll *ep = epi->ep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 599 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", | 
|  | 600 | current, epi->ffd.file, epi, ep)); | 
|  | 601 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 602 | spin_lock_irqsave(&ep->lock, flags); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 603 |  | 
|  | 604 | /* | 
|  | 605 | * If the event mask does not contain any poll(2) event, we consider the | 
|  | 606 | * descriptor to be disabled. This condition is likely the effect of the | 
|  | 607 | * EPOLLONESHOT bit that disables the descriptor when an event is received, | 
|  | 608 | * until the next EPOLL_CTL_MOD will be issued. | 
|  | 609 | */ | 
|  | 610 | if (!(epi->event.events & ~EP_PRIVATE_BITS)) | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 611 | goto out_unlock; | 
|  | 612 |  | 
|  | 613 | /* | 
|  | 614 | * If we are trasfering events to userspace, we can hold no locks | 
|  | 615 | * (because we're accessing user memory, and because of linux f_op->poll() | 
|  | 616 | * semantics). All the events that happens during that period of time are | 
|  | 617 | * chained in ep->ovflist and requeued later on. | 
|  | 618 | */ | 
|  | 619 | if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { | 
|  | 620 | if (epi->next == EP_UNACTIVE_PTR) { | 
|  | 621 | epi->next = ep->ovflist; | 
|  | 622 | ep->ovflist = epi; | 
|  | 623 | } | 
|  | 624 | goto out_unlock; | 
|  | 625 | } | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 626 |  | 
|  | 627 | /* If this file is already in the ready list we exit soon */ | 
|  | 628 | if (ep_is_linked(&epi->rdllink)) | 
|  | 629 | goto is_linked; | 
|  | 630 |  | 
|  | 631 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
|  | 632 |  | 
|  | 633 | is_linked: | 
|  | 634 | /* | 
|  | 635 | * Wake up ( if active ) both the eventpoll wait list and the ->poll() | 
|  | 636 | * wait list. | 
|  | 637 | */ | 
|  | 638 | if (waitqueue_active(&ep->wq)) | 
| Matthew Wilcox | 4a6e9e2 | 2007-08-30 16:10:22 -0400 | [diff] [blame] | 639 | wake_up_locked(&ep->wq); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 640 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 641 | pwake++; | 
|  | 642 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 643 | out_unlock: | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 644 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 645 |  | 
|  | 646 | /* We have to call this outside the lock */ | 
|  | 647 | if (pwake) | 
|  | 648 | ep_poll_safewake(&psw, &ep->poll_wait); | 
|  | 649 |  | 
|  | 650 | return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | } | 
|  | 652 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | /* | 
|  | 654 | * This is the callback that is used to add our wait queue to the | 
|  | 655 | * target file wakeup lists. | 
|  | 656 | */ | 
|  | 657 | static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, | 
|  | 658 | poll_table *pt) | 
|  | 659 | { | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 660 | struct epitem *epi = ep_item_from_epqueue(pt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | struct eppoll_entry *pwq; | 
|  | 662 |  | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 663 | if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); | 
|  | 665 | pwq->whead = whead; | 
|  | 666 | pwq->base = epi; | 
|  | 667 | add_wait_queue(whead, &pwq->wait); | 
|  | 668 | list_add_tail(&pwq->llink, &epi->pwqlist); | 
|  | 669 | epi->nwait++; | 
|  | 670 | } else { | 
|  | 671 | /* We have to signal that an error occurred */ | 
|  | 672 | epi->nwait = -1; | 
|  | 673 | } | 
|  | 674 | } | 
|  | 675 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) | 
|  | 677 | { | 
|  | 678 | int kcmp; | 
|  | 679 | struct rb_node **p = &ep->rbr.rb_node, *parent = NULL; | 
|  | 680 | struct epitem *epic; | 
|  | 681 |  | 
|  | 682 | while (*p) { | 
|  | 683 | parent = *p; | 
|  | 684 | epic = rb_entry(parent, struct epitem, rbn); | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 685 | kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | if (kcmp > 0) | 
|  | 687 | p = &parent->rb_right; | 
|  | 688 | else | 
|  | 689 | p = &parent->rb_left; | 
|  | 690 | } | 
|  | 691 | rb_link_node(&epi->rbn, parent, p); | 
|  | 692 | rb_insert_color(&epi->rbn, &ep->rbr); | 
|  | 693 | } | 
|  | 694 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 695 | /* | 
|  | 696 | * Must be called with "mtx" held. | 
|  | 697 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | 
|  | 699 | struct file *tfile, int fd) | 
|  | 700 | { | 
|  | 701 | int error, revents, pwake = 0; | 
|  | 702 | unsigned long flags; | 
|  | 703 | struct epitem *epi; | 
|  | 704 | struct ep_pqueue epq; | 
|  | 705 |  | 
|  | 706 | error = -ENOMEM; | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 707 | if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 708 | goto error_return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 |  | 
|  | 710 | /* Item initialization follow here ... */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | INIT_LIST_HEAD(&epi->rdllink); | 
|  | 712 | INIT_LIST_HEAD(&epi->fllink); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | INIT_LIST_HEAD(&epi->pwqlist); | 
|  | 714 | epi->ep = ep; | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 715 | ep_set_ffd(&epi->ffd, tfile, fd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | epi->event = *event; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | epi->nwait = 0; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 718 | epi->next = EP_UNACTIVE_PTR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 |  | 
|  | 720 | /* Initialize the poll table using the queue callback */ | 
|  | 721 | epq.epi = epi; | 
|  | 722 | init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); | 
|  | 723 |  | 
|  | 724 | /* | 
|  | 725 | * Attach the item to the poll hooks and get current event bits. | 
|  | 726 | * We can safely use the file* here because its usage count has | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 727 | * been increased by the caller of this function. Note that after | 
|  | 728 | * this operation completes, the poll callback can start hitting | 
|  | 729 | * the new item. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | */ | 
|  | 731 | revents = tfile->f_op->poll(tfile, &epq.pt); | 
|  | 732 |  | 
|  | 733 | /* | 
|  | 734 | * We have to check if something went wrong during the poll wait queue | 
|  | 735 | * install process. Namely an allocation for a wait queue failed due | 
|  | 736 | * high memory pressure. | 
|  | 737 | */ | 
|  | 738 | if (epi->nwait < 0) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 739 | goto error_unregister; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 |  | 
|  | 741 | /* Add the current item to the list of active epoll hook for this file */ | 
|  | 742 | spin_lock(&tfile->f_ep_lock); | 
|  | 743 | list_add_tail(&epi->fllink, &tfile->f_ep_links); | 
|  | 744 | spin_unlock(&tfile->f_ep_lock); | 
|  | 745 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 746 | /* | 
|  | 747 | * Add the current item to the RB tree. All RB tree operations are | 
|  | 748 | * protected by "mtx", and ep_insert() is called with "mtx" held. | 
|  | 749 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | ep_rbtree_insert(ep, epi); | 
|  | 751 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 752 | /* We have to drop the new item inside our item list to keep track of it */ | 
|  | 753 | spin_lock_irqsave(&ep->lock, flags); | 
|  | 754 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | /* If the file is already "ready" we drop it inside the ready list */ | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 756 | if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
|  | 758 |  | 
|  | 759 | /* Notify waiting tasks that events are available */ | 
|  | 760 | if (waitqueue_active(&ep->wq)) | 
| Matthew Wilcox | 4a6e9e2 | 2007-08-30 16:10:22 -0400 | [diff] [blame] | 761 | wake_up_locked(&ep->wq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 763 | pwake++; | 
|  | 764 | } | 
|  | 765 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 766 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 |  | 
|  | 768 | /* We have to call this outside the lock */ | 
|  | 769 | if (pwake) | 
|  | 770 | ep_poll_safewake(&psw, &ep->poll_wait); | 
|  | 771 |  | 
|  | 772 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n", | 
|  | 773 | current, ep, tfile, fd)); | 
|  | 774 |  | 
|  | 775 | return 0; | 
|  | 776 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 777 | error_unregister: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | ep_unregister_pollwait(ep, epi); | 
|  | 779 |  | 
|  | 780 | /* | 
|  | 781 | * We need to do this because an event could have been arrived on some | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 782 | * allocated wait queue. Note that we don't care about the ep->ovflist | 
|  | 783 | * list, since that is used/cleaned only inside a section bound by "mtx". | 
|  | 784 | * And ep_insert() is called with "mtx" held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 786 | spin_lock_irqsave(&ep->lock, flags); | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 787 | if (ep_is_linked(&epi->rdllink)) | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 788 | list_del_init(&epi->rdllink); | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 789 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 |  | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 791 | kmem_cache_free(epi_cache, epi); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 792 | error_return: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | return error; | 
|  | 794 | } | 
|  | 795 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | /* | 
|  | 797 | * Modify the interest event mask by dropping an event if the new mask | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 798 | * has a match in the current file status. Must be called with "mtx" held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | */ | 
|  | 800 | static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) | 
|  | 801 | { | 
|  | 802 | int pwake = 0; | 
|  | 803 | unsigned int revents; | 
|  | 804 | unsigned long flags; | 
|  | 805 |  | 
|  | 806 | /* | 
|  | 807 | * Set the new event interest mask before calling f_op->poll(), otherwise | 
|  | 808 | * a potential race might occur. In fact if we do this operation inside | 
|  | 809 | * the lock, an event might happen between the f_op->poll() call and the | 
|  | 810 | * new event set registering. | 
|  | 811 | */ | 
|  | 812 | epi->event.events = event->events; | 
|  | 813 |  | 
|  | 814 | /* | 
|  | 815 | * Get current event bits. We can safely use the file* here because | 
|  | 816 | * its usage count has been increased by the caller of this function. | 
|  | 817 | */ | 
|  | 818 | revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); | 
|  | 819 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 820 | spin_lock_irqsave(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 |  | 
|  | 822 | /* Copy the data member from inside the lock */ | 
|  | 823 | epi->event.data = event->data; | 
|  | 824 |  | 
|  | 825 | /* | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 826 | * If the item is "hot" and it is not registered inside the ready | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 827 | * list, push it inside. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 829 | if (revents & event->events) { | 
|  | 830 | if (!ep_is_linked(&epi->rdllink)) { | 
|  | 831 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 833 | /* Notify waiting tasks that events are available */ | 
|  | 834 | if (waitqueue_active(&ep->wq)) | 
| Matthew Wilcox | 4a6e9e2 | 2007-08-30 16:10:22 -0400 | [diff] [blame] | 835 | wake_up_locked(&ep->wq); | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 836 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 837 | pwake++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | } | 
|  | 839 | } | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 840 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 |  | 
|  | 842 | /* We have to call this outside the lock */ | 
|  | 843 | if (pwake) | 
|  | 844 | ep_poll_safewake(&psw, &ep->poll_wait); | 
|  | 845 |  | 
|  | 846 | return 0; | 
|  | 847 | } | 
|  | 848 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 849 | static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events, | 
|  | 850 | int maxevents) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | { | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 852 | int eventcnt, error = -EFAULT, pwake = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | unsigned int revents; | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 854 | unsigned long flags; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 855 | struct epitem *epi, *nepi; | 
|  | 856 | struct list_head txlist; | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 857 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 858 | INIT_LIST_HEAD(&txlist); | 
|  | 859 |  | 
|  | 860 | /* | 
|  | 861 | * We need to lock this because we could be hit by | 
|  | 862 | * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL). | 
|  | 863 | */ | 
|  | 864 | mutex_lock(&ep->mtx); | 
|  | 865 |  | 
|  | 866 | /* | 
|  | 867 | * Steal the ready list, and re-init the original one to the | 
|  | 868 | * empty list. Also, set ep->ovflist to NULL so that events | 
|  | 869 | * happening while looping w/out locks, are not lost. We cannot | 
|  | 870 | * have the poll callback to queue directly on ep->rdllist, | 
|  | 871 | * because we are doing it in the loop below, in a lockless way. | 
|  | 872 | */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 873 | spin_lock_irqsave(&ep->lock, flags); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 874 | list_splice(&ep->rdllist, &txlist); | 
|  | 875 | INIT_LIST_HEAD(&ep->rdllist); | 
|  | 876 | ep->ovflist = NULL; | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 877 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 |  | 
|  | 879 | /* | 
|  | 880 | * We can loop without lock because this is a task private list. | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 881 | * We just splice'd out the ep->rdllist in ep_collect_ready_items(). | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 882 | * Items cannot vanish during the loop because we are holding "mtx". | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | */ | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 884 | for (eventcnt = 0; !list_empty(&txlist) && eventcnt < maxevents;) { | 
|  | 885 | epi = list_first_entry(&txlist, struct epitem, rdllink); | 
|  | 886 |  | 
|  | 887 | list_del_init(&epi->rdllink); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 |  | 
|  | 889 | /* | 
|  | 890 | * Get the ready file event set. We can safely use the file | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 891 | * because we are holding the "mtx" and this will guarantee | 
|  | 892 | * that both the file and the item will not vanish. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | */ | 
|  | 894 | revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 895 | revents &= epi->event.events; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 |  | 
|  | 897 | /* | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 898 | * Is the event mask intersect the caller-requested one, | 
|  | 899 | * deliver the event to userspace. Again, we are holding | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 900 | * "mtx", so no operations coming from userspace can change | 
|  | 901 | * the item. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | */ | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 903 | if (revents) { | 
|  | 904 | if (__put_user(revents, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 | &events[eventcnt].events) || | 
|  | 906 | __put_user(epi->event.data, | 
|  | 907 | &events[eventcnt].data)) | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 908 | goto errxit; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | if (epi->event.events & EPOLLONESHOT) | 
|  | 910 | epi->event.events &= EP_PRIVATE_BITS; | 
|  | 911 | eventcnt++; | 
|  | 912 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | /* | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 914 | * At this point, noone can insert into ep->rdllist besides | 
|  | 915 | * us. The epoll_ctl() callers are locked out by us holding | 
|  | 916 | * "mtx" and the poll callback will queue them in ep->ovflist. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 917 | */ | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 918 | if (!(epi->event.events & EPOLLET) && | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 919 | (revents & epi->event.events)) | 
|  | 920 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | } | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 922 | error = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 924 | errxit: | 
|  | 925 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 926 | spin_lock_irqsave(&ep->lock, flags); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 927 | /* | 
|  | 928 | * During the time we spent in the loop above, some other events | 
|  | 929 | * might have been queued by the poll callback. We re-insert them | 
| Davide Libenzi | f337b9c | 2008-10-15 22:01:56 -0700 | [diff] [blame] | 930 | * inside the main ready-list here. | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 931 | */ | 
|  | 932 | for (nepi = ep->ovflist; (epi = nepi) != NULL; | 
| Davide Libenzi | 9ce209d | 2008-10-17 16:17:40 -0700 | [diff] [blame] | 933 | nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { | 
|  | 934 | /* | 
|  | 935 | * If the above loop quit with errors, the epoll item might still | 
|  | 936 | * be linked to "txlist", and the list_splice() done below will | 
|  | 937 | * take care of those cases. | 
|  | 938 | */ | 
|  | 939 | if (!ep_is_linked(&epi->rdllink)) | 
|  | 940 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
|  | 941 | } | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 942 | /* | 
|  | 943 | * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after | 
|  | 944 | * releasing the lock, events will be queued in the normal way inside | 
|  | 945 | * ep->rdllist. | 
|  | 946 | */ | 
|  | 947 | ep->ovflist = EP_UNACTIVE_PTR; | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 948 |  | 
|  | 949 | /* | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 950 | * In case of error in the event-send loop, or in case the number of | 
|  | 951 | * ready events exceeds the userspace limit, we need to splice the | 
|  | 952 | * "txlist" back inside ep->rdllist. | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 953 | */ | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 954 | list_splice(&txlist, &ep->rdllist); | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 955 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 956 | if (!list_empty(&ep->rdllist)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 | /* | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 958 | * Wake up (if active) both the eventpoll wait list and the ->poll() | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 959 | * wait list (delayed after we release the lock). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | */ | 
|  | 961 | if (waitqueue_active(&ep->wq)) | 
| Matthew Wilcox | 4a6e9e2 | 2007-08-30 16:10:22 -0400 | [diff] [blame] | 962 | wake_up_locked(&ep->wq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 964 | pwake++; | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 965 | } | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 966 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 967 |  | 
|  | 968 | mutex_unlock(&ep->mtx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 |  | 
|  | 970 | /* We have to call this outside the lock */ | 
|  | 971 | if (pwake) | 
|  | 972 | ep_poll_safewake(&psw, &ep->poll_wait); | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 973 |  | 
|  | 974 | return eventcnt == 0 ? error: eventcnt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | } | 
|  | 976 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | 
|  | 978 | int maxevents, long timeout) | 
|  | 979 | { | 
|  | 980 | int res, eavail; | 
|  | 981 | unsigned long flags; | 
|  | 982 | long jtimeout; | 
|  | 983 | wait_queue_t wait; | 
|  | 984 |  | 
|  | 985 | /* | 
|  | 986 | * Calculate the timeout by checking for the "infinite" value ( -1 ) | 
|  | 987 | * and the overflow condition. The passed timeout is in milliseconds, | 
|  | 988 | * that why (t * HZ) / 1000. | 
|  | 989 | */ | 
| Davide Libenzi | e3306dd | 2005-09-27 21:45:33 -0700 | [diff] [blame] | 990 | jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ? | 
|  | 991 | MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 |  | 
|  | 993 | retry: | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 994 | spin_lock_irqsave(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 |  | 
|  | 996 | res = 0; | 
|  | 997 | if (list_empty(&ep->rdllist)) { | 
|  | 998 | /* | 
|  | 999 | * We don't have any available event to return to the caller. | 
|  | 1000 | * We need to sleep here, and we will be wake up by | 
|  | 1001 | * ep_poll_callback() when events will become available. | 
|  | 1002 | */ | 
|  | 1003 | init_waitqueue_entry(&wait, current); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 1004 | wait.flags |= WQ_FLAG_EXCLUSIVE; | 
| Davide Libenzi | 3419b23 | 2006-06-25 05:48:14 -0700 | [diff] [blame] | 1005 | __add_wait_queue(&ep->wq, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 |  | 
|  | 1007 | for (;;) { | 
|  | 1008 | /* | 
|  | 1009 | * We don't want to sleep if the ep_poll_callback() sends us | 
|  | 1010 | * a wakeup in between. That's why we set the task state | 
|  | 1011 | * to TASK_INTERRUPTIBLE before doing the checks. | 
|  | 1012 | */ | 
|  | 1013 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 1014 | if (!list_empty(&ep->rdllist) || !jtimeout) | 
|  | 1015 | break; | 
|  | 1016 | if (signal_pending(current)) { | 
|  | 1017 | res = -EINTR; | 
|  | 1018 | break; | 
|  | 1019 | } | 
|  | 1020 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1021 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | jtimeout = schedule_timeout(jtimeout); | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1023 | spin_lock_irqsave(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | } | 
| Davide Libenzi | 3419b23 | 2006-06-25 05:48:14 -0700 | [diff] [blame] | 1025 | __remove_wait_queue(&ep->wq, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1026 |  | 
|  | 1027 | set_current_state(TASK_RUNNING); | 
|  | 1028 | } | 
|  | 1029 |  | 
|  | 1030 | /* Is it worth to try to dig for events ? */ | 
|  | 1031 | eavail = !list_empty(&ep->rdllist); | 
|  | 1032 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1033 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 |  | 
|  | 1035 | /* | 
|  | 1036 | * Try to transfer events to user space. In case we get 0 events and | 
|  | 1037 | * there's still timeout left over, we go trying again in search of | 
|  | 1038 | * more luck. | 
|  | 1039 | */ | 
|  | 1040 | if (!res && eavail && | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 1041 | !(res = ep_send_events(ep, events, maxevents)) && jtimeout) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 | goto retry; | 
|  | 1043 |  | 
|  | 1044 | return res; | 
|  | 1045 | } | 
|  | 1046 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1047 | /* | 
| Andrew Morton | 523723b | 2008-08-12 15:09:01 -0700 | [diff] [blame] | 1048 | * Open an eventpoll file descriptor. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1049 | */ | 
| Ulrich Drepper | 9fe5ad9 | 2008-07-23 21:29:43 -0700 | [diff] [blame] | 1050 | asmlinkage long sys_epoll_create1(int flags) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1051 | { | 
|  | 1052 | int error, fd = -1; | 
|  | 1053 | struct eventpoll *ep; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1054 |  | 
| Ulrich Drepper | e38b36f | 2008-07-23 21:29:42 -0700 | [diff] [blame] | 1055 | /* Check the EPOLL_* constant for consistency.  */ | 
|  | 1056 | BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); | 
|  | 1057 |  | 
| Ulrich Drepper | a0998b5 | 2008-07-23 21:29:27 -0700 | [diff] [blame] | 1058 | if (flags & ~EPOLL_CLOEXEC) | 
|  | 1059 | return -EINVAL; | 
|  | 1060 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1061 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", | 
| Ulrich Drepper | 9fe5ad9 | 2008-07-23 21:29:43 -0700 | [diff] [blame] | 1062 | current, flags)); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1063 |  | 
|  | 1064 | /* | 
| Ulrich Drepper | 9fe5ad9 | 2008-07-23 21:29:43 -0700 | [diff] [blame] | 1065 | * Create the internal data structure ( "struct eventpoll" ). | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1066 | */ | 
| Ulrich Drepper | 9fe5ad9 | 2008-07-23 21:29:43 -0700 | [diff] [blame] | 1067 | error = ep_alloc(&ep); | 
|  | 1068 | if (error < 0) { | 
| Al Viro | 2030a42 | 2008-02-23 06:46:49 -0500 | [diff] [blame] | 1069 | fd = error; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1070 | goto error_return; | 
| Al Viro | 2030a42 | 2008-02-23 06:46:49 -0500 | [diff] [blame] | 1071 | } | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1072 |  | 
|  | 1073 | /* | 
|  | 1074 | * Creates all the items needed to setup an eventpoll file. That is, | 
| Al Viro | 2030a42 | 2008-02-23 06:46:49 -0500 | [diff] [blame] | 1075 | * a file structure and a free file descriptor. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1076 | */ | 
| Ulrich Drepper | a0998b5 | 2008-07-23 21:29:27 -0700 | [diff] [blame] | 1077 | fd = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, | 
|  | 1078 | flags & O_CLOEXEC); | 
| Al Viro | 2030a42 | 2008-02-23 06:46:49 -0500 | [diff] [blame] | 1079 | if (fd < 0) | 
|  | 1080 | ep_free(ep); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1081 |  | 
| Al Viro | 2030a42 | 2008-02-23 06:46:49 -0500 | [diff] [blame] | 1082 | error_return: | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1083 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", | 
| Ulrich Drepper | 9fe5ad9 | 2008-07-23 21:29:43 -0700 | [diff] [blame] | 1084 | current, flags, fd)); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1085 |  | 
|  | 1086 | return fd; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1087 | } | 
|  | 1088 |  | 
| Ulrich Drepper | a0998b5 | 2008-07-23 21:29:27 -0700 | [diff] [blame] | 1089 | asmlinkage long sys_epoll_create(int size) | 
|  | 1090 | { | 
| Ulrich Drepper | 9fe5ad9 | 2008-07-23 21:29:43 -0700 | [diff] [blame] | 1091 | if (size < 0) | 
|  | 1092 | return -EINVAL; | 
|  | 1093 |  | 
|  | 1094 | return sys_epoll_create1(0); | 
| Ulrich Drepper | a0998b5 | 2008-07-23 21:29:27 -0700 | [diff] [blame] | 1095 | } | 
|  | 1096 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1097 | /* | 
|  | 1098 | * The following function implements the controller interface for | 
|  | 1099 | * the eventpoll file that enables the insertion/removal/change of | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 1100 | * file descriptors inside the interest set. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1101 | */ | 
|  | 1102 | asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, | 
|  | 1103 | struct epoll_event __user *event) | 
|  | 1104 | { | 
|  | 1105 | int error; | 
|  | 1106 | struct file *file, *tfile; | 
|  | 1107 | struct eventpoll *ep; | 
|  | 1108 | struct epitem *epi; | 
|  | 1109 | struct epoll_event epds; | 
|  | 1110 |  | 
|  | 1111 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n", | 
|  | 1112 | current, epfd, op, fd, event)); | 
|  | 1113 |  | 
|  | 1114 | error = -EFAULT; | 
|  | 1115 | if (ep_op_has_event(op) && | 
|  | 1116 | copy_from_user(&epds, event, sizeof(struct epoll_event))) | 
|  | 1117 | goto error_return; | 
|  | 1118 |  | 
|  | 1119 | /* Get the "struct file *" for the eventpoll file */ | 
|  | 1120 | error = -EBADF; | 
|  | 1121 | file = fget(epfd); | 
|  | 1122 | if (!file) | 
|  | 1123 | goto error_return; | 
|  | 1124 |  | 
|  | 1125 | /* Get the "struct file *" for the target file */ | 
|  | 1126 | tfile = fget(fd); | 
|  | 1127 | if (!tfile) | 
|  | 1128 | goto error_fput; | 
|  | 1129 |  | 
|  | 1130 | /* The target file descriptor must support poll */ | 
|  | 1131 | error = -EPERM; | 
|  | 1132 | if (!tfile->f_op || !tfile->f_op->poll) | 
|  | 1133 | goto error_tgt_fput; | 
|  | 1134 |  | 
|  | 1135 | /* | 
|  | 1136 | * We have to check that the file structure underneath the file descriptor | 
|  | 1137 | * the user passed to us _is_ an eventpoll file. And also we do not permit | 
|  | 1138 | * adding an epoll file descriptor inside itself. | 
|  | 1139 | */ | 
|  | 1140 | error = -EINVAL; | 
|  | 1141 | if (file == tfile || !is_file_epoll(file)) | 
|  | 1142 | goto error_tgt_fput; | 
|  | 1143 |  | 
|  | 1144 | /* | 
|  | 1145 | * At this point it is safe to assume that the "private_data" contains | 
|  | 1146 | * our own data structure. | 
|  | 1147 | */ | 
|  | 1148 | ep = file->private_data; | 
|  | 1149 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 1150 | mutex_lock(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1151 |  | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 1152 | /* | 
|  | 1153 | * Try to lookup the file inside our RB tree, Since we grabbed "mtx" | 
|  | 1154 | * above, we can be sure to be able to use the item looked up by | 
|  | 1155 | * ep_find() till we release the mutex. | 
|  | 1156 | */ | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1157 | epi = ep_find(ep, tfile, fd); | 
|  | 1158 |  | 
|  | 1159 | error = -EINVAL; | 
|  | 1160 | switch (op) { | 
|  | 1161 | case EPOLL_CTL_ADD: | 
|  | 1162 | if (!epi) { | 
|  | 1163 | epds.events |= POLLERR | POLLHUP; | 
|  | 1164 |  | 
|  | 1165 | error = ep_insert(ep, &epds, tfile, fd); | 
|  | 1166 | } else | 
|  | 1167 | error = -EEXIST; | 
|  | 1168 | break; | 
|  | 1169 | case EPOLL_CTL_DEL: | 
|  | 1170 | if (epi) | 
|  | 1171 | error = ep_remove(ep, epi); | 
|  | 1172 | else | 
|  | 1173 | error = -ENOENT; | 
|  | 1174 | break; | 
|  | 1175 | case EPOLL_CTL_MOD: | 
|  | 1176 | if (epi) { | 
|  | 1177 | epds.events |= POLLERR | POLLHUP; | 
|  | 1178 | error = ep_modify(ep, epi, &epds); | 
|  | 1179 | } else | 
|  | 1180 | error = -ENOENT; | 
|  | 1181 | break; | 
|  | 1182 | } | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 1183 | mutex_unlock(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1184 |  | 
|  | 1185 | error_tgt_fput: | 
|  | 1186 | fput(tfile); | 
|  | 1187 | error_fput: | 
|  | 1188 | fput(file); | 
|  | 1189 | error_return: | 
|  | 1190 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n", | 
|  | 1191 | current, epfd, op, fd, event, error)); | 
|  | 1192 |  | 
|  | 1193 | return error; | 
|  | 1194 | } | 
|  | 1195 |  | 
|  | 1196 | /* | 
|  | 1197 | * Implement the event wait interface for the eventpoll file. It is the kernel | 
|  | 1198 | * part of the user space epoll_wait(2). | 
|  | 1199 | */ | 
|  | 1200 | asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, | 
|  | 1201 | int maxevents, int timeout) | 
|  | 1202 | { | 
|  | 1203 | int error; | 
|  | 1204 | struct file *file; | 
|  | 1205 | struct eventpoll *ep; | 
|  | 1206 |  | 
|  | 1207 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n", | 
|  | 1208 | current, epfd, events, maxevents, timeout)); | 
|  | 1209 |  | 
|  | 1210 | /* The maximum number of event must be greater than zero */ | 
|  | 1211 | if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) | 
|  | 1212 | return -EINVAL; | 
|  | 1213 |  | 
|  | 1214 | /* Verify that the area passed by the user is writeable */ | 
|  | 1215 | if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) { | 
|  | 1216 | error = -EFAULT; | 
|  | 1217 | goto error_return; | 
|  | 1218 | } | 
|  | 1219 |  | 
|  | 1220 | /* Get the "struct file *" for the eventpoll file */ | 
|  | 1221 | error = -EBADF; | 
|  | 1222 | file = fget(epfd); | 
|  | 1223 | if (!file) | 
|  | 1224 | goto error_return; | 
|  | 1225 |  | 
|  | 1226 | /* | 
|  | 1227 | * We have to check that the file structure underneath the fd | 
|  | 1228 | * the user passed to us _is_ an eventpoll file. | 
|  | 1229 | */ | 
|  | 1230 | error = -EINVAL; | 
|  | 1231 | if (!is_file_epoll(file)) | 
|  | 1232 | goto error_fput; | 
|  | 1233 |  | 
|  | 1234 | /* | 
|  | 1235 | * At this point it is safe to assume that the "private_data" contains | 
|  | 1236 | * our own data structure. | 
|  | 1237 | */ | 
|  | 1238 | ep = file->private_data; | 
|  | 1239 |  | 
|  | 1240 | /* Time to fish for events ... */ | 
|  | 1241 | error = ep_poll(ep, events, maxevents, timeout); | 
|  | 1242 |  | 
|  | 1243 | error_fput: | 
|  | 1244 | fput(file); | 
|  | 1245 | error_return: | 
|  | 1246 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n", | 
|  | 1247 | current, epfd, events, maxevents, timeout, error)); | 
|  | 1248 |  | 
|  | 1249 | return error; | 
|  | 1250 | } | 
|  | 1251 |  | 
| Roland McGrath | f3de272 | 2008-04-30 00:53:09 -0700 | [diff] [blame] | 1252 | #ifdef HAVE_SET_RESTORE_SIGMASK | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1253 |  | 
|  | 1254 | /* | 
|  | 1255 | * Implement the event wait interface for the eventpoll file. It is the kernel | 
|  | 1256 | * part of the user space epoll_pwait(2). | 
|  | 1257 | */ | 
|  | 1258 | asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, | 
|  | 1259 | int maxevents, int timeout, const sigset_t __user *sigmask, | 
|  | 1260 | size_t sigsetsize) | 
|  | 1261 | { | 
|  | 1262 | int error; | 
|  | 1263 | sigset_t ksigmask, sigsaved; | 
|  | 1264 |  | 
|  | 1265 | /* | 
|  | 1266 | * If the caller wants a certain signal mask to be set during the wait, | 
|  | 1267 | * we apply it here. | 
|  | 1268 | */ | 
|  | 1269 | if (sigmask) { | 
|  | 1270 | if (sigsetsize != sizeof(sigset_t)) | 
|  | 1271 | return -EINVAL; | 
|  | 1272 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) | 
|  | 1273 | return -EFAULT; | 
|  | 1274 | sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | 
|  | 1275 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | 
|  | 1276 | } | 
|  | 1277 |  | 
|  | 1278 | error = sys_epoll_wait(epfd, events, maxevents, timeout); | 
|  | 1279 |  | 
|  | 1280 | /* | 
|  | 1281 | * If we changed the signal mask, we need to restore the original one. | 
|  | 1282 | * In case we've got a signal while waiting, we do not restore the | 
|  | 1283 | * signal mask yet, and we allow do_signal() to deliver the signal on | 
|  | 1284 | * the way back to userspace, before the signal mask is restored. | 
|  | 1285 | */ | 
|  | 1286 | if (sigmask) { | 
|  | 1287 | if (error == -EINTR) { | 
|  | 1288 | memcpy(¤t->saved_sigmask, &sigsaved, | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1289 | sizeof(sigsaved)); | 
| Roland McGrath | 4e4c22c | 2008-04-30 00:53:06 -0700 | [diff] [blame] | 1290 | set_restore_sigmask(); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1291 | } else | 
|  | 1292 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 
|  | 1293 | } | 
|  | 1294 |  | 
|  | 1295 | return error; | 
|  | 1296 | } | 
|  | 1297 |  | 
| Roland McGrath | f3de272 | 2008-04-30 00:53:09 -0700 | [diff] [blame] | 1298 | #endif /* HAVE_SET_RESTORE_SIGMASK */ | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1299 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | static int __init eventpoll_init(void) | 
|  | 1301 | { | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 1302 | mutex_init(&epmutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 |  | 
|  | 1304 | /* Initialize the structure used to perform safe poll wait head wake ups */ | 
|  | 1305 | ep_poll_safewake_init(&psw); | 
|  | 1306 |  | 
|  | 1307 | /* Allocates slab cache used to allocate "struct epitem" items */ | 
|  | 1308 | epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), | 
|  | 1309 | 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC, | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1310 | NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 |  | 
|  | 1312 | /* Allocates slab cache used to allocate "struct eppoll_entry" */ | 
|  | 1313 | pwq_cache = kmem_cache_create("eventpoll_pwq", | 
|  | 1314 | sizeof(struct eppoll_entry), 0, | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1315 | EPI_SLAB_DEBUG|SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | } | 
| Davide Libenzi | cea6924 | 2007-05-10 22:23:22 -0700 | [diff] [blame] | 1319 | fs_initcall(eventpoll_init); |