| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *	An async IO implementation for Linux | 
|  | 3 | *	Written by Benjamin LaHaise <bcrl@kvack.org> | 
|  | 4 | * | 
|  | 5 | *	Implements an efficient asynchronous io interface. | 
|  | 6 | * | 
|  | 7 | *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved. | 
|  | 8 | * | 
|  | 9 | *	See ../COPYING for licensing terms. | 
|  | 10 | */ | 
|  | 11 | #include <linux/kernel.h> | 
|  | 12 | #include <linux/init.h> | 
|  | 13 | #include <linux/errno.h> | 
|  | 14 | #include <linux/time.h> | 
|  | 15 | #include <linux/aio_abi.h> | 
|  | 16 | #include <linux/module.h> | 
|  | 17 | #include <linux/syscalls.h> | 
|  | 18 |  | 
|  | 19 | #define DEBUG 0 | 
|  | 20 |  | 
|  | 21 | #include <linux/sched.h> | 
|  | 22 | #include <linux/fs.h> | 
|  | 23 | #include <linux/file.h> | 
|  | 24 | #include <linux/mm.h> | 
|  | 25 | #include <linux/mman.h> | 
|  | 26 | #include <linux/slab.h> | 
|  | 27 | #include <linux/timer.h> | 
|  | 28 | #include <linux/aio.h> | 
|  | 29 | #include <linux/highmem.h> | 
|  | 30 | #include <linux/workqueue.h> | 
|  | 31 | #include <linux/security.h> | 
|  | 32 |  | 
|  | 33 | #include <asm/kmap_types.h> | 
|  | 34 | #include <asm/uaccess.h> | 
|  | 35 | #include <asm/mmu_context.h> | 
|  | 36 |  | 
|  | 37 | #if DEBUG > 1 | 
|  | 38 | #define dprintk		printk | 
|  | 39 | #else | 
|  | 40 | #define dprintk(x...)	do { ; } while (0) | 
|  | 41 | #endif | 
|  | 42 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | /*------ sysctl variables----*/ | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 44 | static DEFINE_SPINLOCK(aio_nr_lock); | 
|  | 45 | unsigned long aio_nr;		/* current system wide number of aio requests */ | 
|  | 46 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | /*----end sysctl variables---*/ | 
|  | 48 |  | 
|  | 49 | static kmem_cache_t	*kiocb_cachep; | 
|  | 50 | static kmem_cache_t	*kioctx_cachep; | 
|  | 51 |  | 
|  | 52 | static struct workqueue_struct *aio_wq; | 
|  | 53 |  | 
|  | 54 | /* Used for rare fput completion. */ | 
|  | 55 | static void aio_fput_routine(void *); | 
|  | 56 | static DECLARE_WORK(fput_work, aio_fput_routine, NULL); | 
|  | 57 |  | 
|  | 58 | static DEFINE_SPINLOCK(fput_lock); | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 59 | static LIST_HEAD(fput_head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 |  | 
|  | 61 | static void aio_kick_handler(void *); | 
| Sébastien Dugu | c016e22 | 2005-06-28 20:44:59 -0700 | [diff] [blame] | 62 | static void aio_queue_work(struct kioctx *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 |  | 
|  | 64 | /* aio_setup | 
|  | 65 | *	Creates the slab caches used by the aio routines, panic on | 
|  | 66 | *	failure as this is done early during the boot sequence. | 
|  | 67 | */ | 
|  | 68 | static int __init aio_setup(void) | 
|  | 69 | { | 
|  | 70 | kiocb_cachep = kmem_cache_create("kiocb", sizeof(struct kiocb), | 
|  | 71 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 
|  | 72 | kioctx_cachep = kmem_cache_create("kioctx", sizeof(struct kioctx), | 
|  | 73 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 
|  | 74 |  | 
|  | 75 | aio_wq = create_workqueue("aio"); | 
|  | 76 |  | 
|  | 77 | pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); | 
|  | 78 |  | 
|  | 79 | return 0; | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | static void aio_free_ring(struct kioctx *ctx) | 
|  | 83 | { | 
|  | 84 | struct aio_ring_info *info = &ctx->ring_info; | 
|  | 85 | long i; | 
|  | 86 |  | 
|  | 87 | for (i=0; i<info->nr_pages; i++) | 
|  | 88 | put_page(info->ring_pages[i]); | 
|  | 89 |  | 
|  | 90 | if (info->mmap_size) { | 
|  | 91 | down_write(&ctx->mm->mmap_sem); | 
|  | 92 | do_munmap(ctx->mm, info->mmap_base, info->mmap_size); | 
|  | 93 | up_write(&ctx->mm->mmap_sem); | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | if (info->ring_pages && info->ring_pages != info->internal_pages) | 
|  | 97 | kfree(info->ring_pages); | 
|  | 98 | info->ring_pages = NULL; | 
|  | 99 | info->nr = 0; | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | static int aio_setup_ring(struct kioctx *ctx) | 
|  | 103 | { | 
|  | 104 | struct aio_ring *ring; | 
|  | 105 | struct aio_ring_info *info = &ctx->ring_info; | 
|  | 106 | unsigned nr_events = ctx->max_reqs; | 
|  | 107 | unsigned long size; | 
|  | 108 | int nr_pages; | 
|  | 109 |  | 
|  | 110 | /* Compensate for the ring buffer's head/tail overlap entry */ | 
|  | 111 | nr_events += 2;	/* 1 is required, 2 for good luck */ | 
|  | 112 |  | 
|  | 113 | size = sizeof(struct aio_ring); | 
|  | 114 | size += sizeof(struct io_event) * nr_events; | 
|  | 115 | nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; | 
|  | 116 |  | 
|  | 117 | if (nr_pages < 0) | 
|  | 118 | return -EINVAL; | 
|  | 119 |  | 
|  | 120 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); | 
|  | 121 |  | 
|  | 122 | info->nr = 0; | 
|  | 123 | info->ring_pages = info->internal_pages; | 
|  | 124 | if (nr_pages > AIO_RING_PAGES) { | 
| Oliver Neukum | 11b0b5a | 2006-03-25 03:08:13 -0800 | [diff] [blame] | 125 | info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | if (!info->ring_pages) | 
|  | 127 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } | 
|  | 129 |  | 
|  | 130 | info->mmap_size = nr_pages * PAGE_SIZE; | 
|  | 131 | dprintk("attempting mmap of %lu bytes\n", info->mmap_size); | 
|  | 132 | down_write(&ctx->mm->mmap_sem); | 
|  | 133 | info->mmap_base = do_mmap(NULL, 0, info->mmap_size, | 
|  | 134 | PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, | 
|  | 135 | 0); | 
|  | 136 | if (IS_ERR((void *)info->mmap_base)) { | 
|  | 137 | up_write(&ctx->mm->mmap_sem); | 
|  | 138 | printk("mmap err: %ld\n", -info->mmap_base); | 
|  | 139 | info->mmap_size = 0; | 
|  | 140 | aio_free_ring(ctx); | 
|  | 141 | return -EAGAIN; | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | dprintk("mmap address: 0x%08lx\n", info->mmap_base); | 
|  | 145 | info->nr_pages = get_user_pages(current, ctx->mm, | 
|  | 146 | info->mmap_base, nr_pages, | 
|  | 147 | 1, 0, info->ring_pages, NULL); | 
|  | 148 | up_write(&ctx->mm->mmap_sem); | 
|  | 149 |  | 
|  | 150 | if (unlikely(info->nr_pages != nr_pages)) { | 
|  | 151 | aio_free_ring(ctx); | 
|  | 152 | return -EAGAIN; | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | ctx->user_id = info->mmap_base; | 
|  | 156 |  | 
|  | 157 | info->nr = nr_events;		/* trusted copy */ | 
|  | 158 |  | 
|  | 159 | ring = kmap_atomic(info->ring_pages[0], KM_USER0); | 
|  | 160 | ring->nr = nr_events;	/* user copy */ | 
|  | 161 | ring->id = ctx->user_id; | 
|  | 162 | ring->head = ring->tail = 0; | 
|  | 163 | ring->magic = AIO_RING_MAGIC; | 
|  | 164 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | 
|  | 165 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | 
|  | 166 | ring->header_length = sizeof(struct aio_ring); | 
|  | 167 | kunmap_atomic(ring, KM_USER0); | 
|  | 168 |  | 
|  | 169 | return 0; | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 |  | 
|  | 173 | /* aio_ring_event: returns a pointer to the event at the given index from | 
|  | 174 | * kmap_atomic(, km).  Release the pointer with put_aio_ring_event(); | 
|  | 175 | */ | 
|  | 176 | #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event)) | 
|  | 177 | #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | 
|  | 178 | #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | 
|  | 179 |  | 
|  | 180 | #define aio_ring_event(info, nr, km) ({					\ | 
|  | 181 | unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\ | 
|  | 182 | struct io_event *__event;					\ | 
|  | 183 | __event = kmap_atomic(						\ | 
|  | 184 | (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ | 
|  | 185 | __event += pos % AIO_EVENTS_PER_PAGE;				\ | 
|  | 186 | __event;							\ | 
|  | 187 | }) | 
|  | 188 |  | 
|  | 189 | #define put_aio_ring_event(event, km) do {	\ | 
|  | 190 | struct io_event *__event = (event);	\ | 
|  | 191 | (void)__event;				\ | 
|  | 192 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ | 
|  | 193 | } while(0) | 
|  | 194 |  | 
|  | 195 | /* ioctx_alloc | 
|  | 196 | *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed. | 
|  | 197 | */ | 
|  | 198 | static struct kioctx *ioctx_alloc(unsigned nr_events) | 
|  | 199 | { | 
|  | 200 | struct mm_struct *mm; | 
|  | 201 | struct kioctx *ctx; | 
|  | 202 |  | 
|  | 203 | /* Prevent overflows */ | 
|  | 204 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | 
|  | 205 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { | 
|  | 206 | pr_debug("ENOMEM: nr_events too high\n"); | 
|  | 207 | return ERR_PTR(-EINVAL); | 
|  | 208 | } | 
|  | 209 |  | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 210 | if ((unsigned long)nr_events > aio_max_nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | return ERR_PTR(-EAGAIN); | 
|  | 212 |  | 
|  | 213 | ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); | 
|  | 214 | if (!ctx) | 
|  | 215 | return ERR_PTR(-ENOMEM); | 
|  | 216 |  | 
|  | 217 | memset(ctx, 0, sizeof(*ctx)); | 
|  | 218 | ctx->max_reqs = nr_events; | 
|  | 219 | mm = ctx->mm = current->mm; | 
|  | 220 | atomic_inc(&mm->mm_count); | 
|  | 221 |  | 
|  | 222 | atomic_set(&ctx->users, 1); | 
|  | 223 | spin_lock_init(&ctx->ctx_lock); | 
|  | 224 | spin_lock_init(&ctx->ring_info.ring_lock); | 
|  | 225 | init_waitqueue_head(&ctx->wait); | 
|  | 226 |  | 
|  | 227 | INIT_LIST_HEAD(&ctx->active_reqs); | 
|  | 228 | INIT_LIST_HEAD(&ctx->run_list); | 
|  | 229 | INIT_WORK(&ctx->wq, aio_kick_handler, ctx); | 
|  | 230 |  | 
|  | 231 | if (aio_setup_ring(ctx) < 0) | 
|  | 232 | goto out_freectx; | 
|  | 233 |  | 
|  | 234 | /* limit the number of system wide aios */ | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 235 | spin_lock(&aio_nr_lock); | 
|  | 236 | if (aio_nr + ctx->max_reqs > aio_max_nr || | 
|  | 237 | aio_nr + ctx->max_reqs < aio_nr) | 
|  | 238 | ctx->max_reqs = 0; | 
|  | 239 | else | 
|  | 240 | aio_nr += ctx->max_reqs; | 
|  | 241 | spin_unlock(&aio_nr_lock); | 
|  | 242 | if (ctx->max_reqs == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | goto out_cleanup; | 
|  | 244 |  | 
|  | 245 | /* now link into global list.  kludge.  FIXME */ | 
|  | 246 | write_lock(&mm->ioctx_list_lock); | 
|  | 247 | ctx->next = mm->ioctx_list; | 
|  | 248 | mm->ioctx_list = ctx; | 
|  | 249 | write_unlock(&mm->ioctx_list_lock); | 
|  | 250 |  | 
|  | 251 | dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", | 
|  | 252 | ctx, ctx->user_id, current->mm, ctx->ring_info.nr); | 
|  | 253 | return ctx; | 
|  | 254 |  | 
|  | 255 | out_cleanup: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | __put_ioctx(ctx); | 
|  | 257 | return ERR_PTR(-EAGAIN); | 
|  | 258 |  | 
|  | 259 | out_freectx: | 
|  | 260 | mmdrop(mm); | 
|  | 261 | kmem_cache_free(kioctx_cachep, ctx); | 
|  | 262 | ctx = ERR_PTR(-ENOMEM); | 
|  | 263 |  | 
|  | 264 | dprintk("aio: error allocating ioctx %p\n", ctx); | 
|  | 265 | return ctx; | 
|  | 266 | } | 
|  | 267 |  | 
|  | 268 | /* aio_cancel_all | 
|  | 269 | *	Cancels all outstanding aio requests on an aio context.  Used | 
|  | 270 | *	when the processes owning a context have all exited to encourage | 
|  | 271 | *	the rapid destruction of the kioctx. | 
|  | 272 | */ | 
|  | 273 | static void aio_cancel_all(struct kioctx *ctx) | 
|  | 274 | { | 
|  | 275 | int (*cancel)(struct kiocb *, struct io_event *); | 
|  | 276 | struct io_event res; | 
|  | 277 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 278 | ctx->dead = 1; | 
|  | 279 | while (!list_empty(&ctx->active_reqs)) { | 
|  | 280 | struct list_head *pos = ctx->active_reqs.next; | 
|  | 281 | struct kiocb *iocb = list_kiocb(pos); | 
|  | 282 | list_del_init(&iocb->ki_list); | 
|  | 283 | cancel = iocb->ki_cancel; | 
|  | 284 | kiocbSetCancelled(iocb); | 
|  | 285 | if (cancel) { | 
|  | 286 | iocb->ki_users++; | 
|  | 287 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 288 | cancel(iocb, &res); | 
|  | 289 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 290 | } | 
|  | 291 | } | 
|  | 292 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 293 | } | 
|  | 294 |  | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 295 | static void wait_for_all_aios(struct kioctx *ctx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | { | 
|  | 297 | struct task_struct *tsk = current; | 
|  | 298 | DECLARE_WAITQUEUE(wait, tsk); | 
|  | 299 |  | 
|  | 300 | if (!ctx->reqs_active) | 
|  | 301 | return; | 
|  | 302 |  | 
|  | 303 | add_wait_queue(&ctx->wait, &wait); | 
|  | 304 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
|  | 305 | while (ctx->reqs_active) { | 
|  | 306 | schedule(); | 
|  | 307 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
|  | 308 | } | 
|  | 309 | __set_task_state(tsk, TASK_RUNNING); | 
|  | 310 | remove_wait_queue(&ctx->wait, &wait); | 
|  | 311 | } | 
|  | 312 |  | 
|  | 313 | /* wait_on_sync_kiocb: | 
|  | 314 | *	Waits on the given sync kiocb to complete. | 
|  | 315 | */ | 
|  | 316 | ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) | 
|  | 317 | { | 
|  | 318 | while (iocb->ki_users) { | 
|  | 319 | set_current_state(TASK_UNINTERRUPTIBLE); | 
|  | 320 | if (!iocb->ki_users) | 
|  | 321 | break; | 
|  | 322 | schedule(); | 
|  | 323 | } | 
|  | 324 | __set_current_state(TASK_RUNNING); | 
|  | 325 | return iocb->ki_user_data; | 
|  | 326 | } | 
|  | 327 |  | 
|  | 328 | /* exit_aio: called when the last user of mm goes away.  At this point, | 
|  | 329 | * there is no way for any new requests to be submited or any of the | 
|  | 330 | * io_* syscalls to be called on the context.  However, there may be | 
|  | 331 | * outstanding requests which hold references to the context; as they | 
|  | 332 | * go away, they will call put_ioctx and release any pinned memory | 
|  | 333 | * associated with the request (held via struct page * references). | 
|  | 334 | */ | 
|  | 335 | void fastcall exit_aio(struct mm_struct *mm) | 
|  | 336 | { | 
|  | 337 | struct kioctx *ctx = mm->ioctx_list; | 
|  | 338 | mm->ioctx_list = NULL; | 
|  | 339 | while (ctx) { | 
|  | 340 | struct kioctx *next = ctx->next; | 
|  | 341 | ctx->next = NULL; | 
|  | 342 | aio_cancel_all(ctx); | 
|  | 343 |  | 
|  | 344 | wait_for_all_aios(ctx); | 
|  | 345 | /* | 
|  | 346 | * this is an overkill, but ensures we don't leave | 
|  | 347 | * the ctx on the aio_wq | 
|  | 348 | */ | 
|  | 349 | flush_workqueue(aio_wq); | 
|  | 350 |  | 
|  | 351 | if (1 != atomic_read(&ctx->users)) | 
|  | 352 | printk(KERN_DEBUG | 
|  | 353 | "exit_aio:ioctx still alive: %d %d %d\n", | 
|  | 354 | atomic_read(&ctx->users), ctx->dead, | 
|  | 355 | ctx->reqs_active); | 
|  | 356 | put_ioctx(ctx); | 
|  | 357 | ctx = next; | 
|  | 358 | } | 
|  | 359 | } | 
|  | 360 |  | 
|  | 361 | /* __put_ioctx | 
|  | 362 | *	Called when the last user of an aio context has gone away, | 
|  | 363 | *	and the struct needs to be freed. | 
|  | 364 | */ | 
|  | 365 | void fastcall __put_ioctx(struct kioctx *ctx) | 
|  | 366 | { | 
|  | 367 | unsigned nr_events = ctx->max_reqs; | 
|  | 368 |  | 
|  | 369 | if (unlikely(ctx->reqs_active)) | 
|  | 370 | BUG(); | 
|  | 371 |  | 
|  | 372 | cancel_delayed_work(&ctx->wq); | 
|  | 373 | flush_workqueue(aio_wq); | 
|  | 374 | aio_free_ring(ctx); | 
|  | 375 | mmdrop(ctx->mm); | 
|  | 376 | ctx->mm = NULL; | 
|  | 377 | pr_debug("__put_ioctx: freeing %p\n", ctx); | 
|  | 378 | kmem_cache_free(kioctx_cachep, ctx); | 
|  | 379 |  | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 380 | if (nr_events) { | 
|  | 381 | spin_lock(&aio_nr_lock); | 
|  | 382 | BUG_ON(aio_nr - nr_events > aio_nr); | 
|  | 383 | aio_nr -= nr_events; | 
|  | 384 | spin_unlock(&aio_nr_lock); | 
|  | 385 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | } | 
|  | 387 |  | 
|  | 388 | /* aio_get_req | 
|  | 389 | *	Allocate a slot for an aio request.  Increments the users count | 
|  | 390 | * of the kioctx so that the kioctx stays around until all requests are | 
|  | 391 | * complete.  Returns NULL if no requests are free. | 
|  | 392 | * | 
|  | 393 | * Returns with kiocb->users set to 2.  The io submit code path holds | 
|  | 394 | * an extra reference while submitting the i/o. | 
|  | 395 | * This prevents races between the aio code path referencing the | 
|  | 396 | * req (after submitting it) and aio_complete() freeing the req. | 
|  | 397 | */ | 
|  | 398 | static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx)); | 
|  | 399 | static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | 
|  | 400 | { | 
|  | 401 | struct kiocb *req = NULL; | 
|  | 402 | struct aio_ring *ring; | 
|  | 403 | int okay = 0; | 
|  | 404 |  | 
|  | 405 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); | 
|  | 406 | if (unlikely(!req)) | 
|  | 407 | return NULL; | 
|  | 408 |  | 
| Zach Brown | 4faa528 | 2005-10-17 16:43:33 -0700 | [diff] [blame] | 409 | req->ki_flags = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | req->ki_users = 2; | 
|  | 411 | req->ki_key = 0; | 
|  | 412 | req->ki_ctx = ctx; | 
|  | 413 | req->ki_cancel = NULL; | 
|  | 414 | req->ki_retry = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | req->ki_dtor = NULL; | 
|  | 416 | req->private = NULL; | 
|  | 417 | INIT_LIST_HEAD(&req->ki_run_list); | 
|  | 418 |  | 
|  | 419 | /* Check if the completion queue has enough free space to | 
|  | 420 | * accept an event from this io. | 
|  | 421 | */ | 
|  | 422 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 423 | ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); | 
|  | 424 | if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { | 
|  | 425 | list_add(&req->ki_list, &ctx->active_reqs); | 
|  | 426 | get_ioctx(ctx); | 
|  | 427 | ctx->reqs_active++; | 
|  | 428 | okay = 1; | 
|  | 429 | } | 
|  | 430 | kunmap_atomic(ring, KM_USER0); | 
|  | 431 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 432 |  | 
|  | 433 | if (!okay) { | 
|  | 434 | kmem_cache_free(kiocb_cachep, req); | 
|  | 435 | req = NULL; | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | return req; | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) | 
|  | 442 | { | 
|  | 443 | struct kiocb *req; | 
|  | 444 | /* Handle a potential starvation case -- should be exceedingly rare as | 
|  | 445 | * requests will be stuck on fput_head only if the aio_fput_routine is | 
|  | 446 | * delayed and the requests were the last user of the struct file. | 
|  | 447 | */ | 
|  | 448 | req = __aio_get_req(ctx); | 
|  | 449 | if (unlikely(NULL == req)) { | 
|  | 450 | aio_fput_routine(NULL); | 
|  | 451 | req = __aio_get_req(ctx); | 
|  | 452 | } | 
|  | 453 | return req; | 
|  | 454 | } | 
|  | 455 |  | 
|  | 456 | static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | 
|  | 457 | { | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 458 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 459 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | if (req->ki_dtor) | 
|  | 461 | req->ki_dtor(req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | kmem_cache_free(kiocb_cachep, req); | 
|  | 463 | ctx->reqs_active--; | 
|  | 464 |  | 
|  | 465 | if (unlikely(!ctx->reqs_active && ctx->dead)) | 
|  | 466 | wake_up(&ctx->wait); | 
|  | 467 | } | 
|  | 468 |  | 
|  | 469 | static void aio_fput_routine(void *data) | 
|  | 470 | { | 
|  | 471 | spin_lock_irq(&fput_lock); | 
|  | 472 | while (likely(!list_empty(&fput_head))) { | 
|  | 473 | struct kiocb *req = list_kiocb(fput_head.next); | 
|  | 474 | struct kioctx *ctx = req->ki_ctx; | 
|  | 475 |  | 
|  | 476 | list_del(&req->ki_list); | 
|  | 477 | spin_unlock_irq(&fput_lock); | 
|  | 478 |  | 
|  | 479 | /* Complete the fput */ | 
|  | 480 | __fput(req->ki_filp); | 
|  | 481 |  | 
|  | 482 | /* Link the iocb into the context's free list */ | 
|  | 483 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 484 | really_put_req(ctx, req); | 
|  | 485 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 486 |  | 
|  | 487 | put_ioctx(ctx); | 
|  | 488 | spin_lock_irq(&fput_lock); | 
|  | 489 | } | 
|  | 490 | spin_unlock_irq(&fput_lock); | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | /* __aio_put_req | 
|  | 494 | *	Returns true if this put was the last user of the request. | 
|  | 495 | */ | 
|  | 496 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | 
|  | 497 | { | 
|  | 498 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n", | 
|  | 499 | req, atomic_read(&req->ki_filp->f_count)); | 
|  | 500 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 501 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 502 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | req->ki_users --; | 
|  | 504 | if (unlikely(req->ki_users < 0)) | 
|  | 505 | BUG(); | 
|  | 506 | if (likely(req->ki_users)) | 
|  | 507 | return 0; | 
|  | 508 | list_del(&req->ki_list);		/* remove from active_reqs */ | 
|  | 509 | req->ki_cancel = NULL; | 
|  | 510 | req->ki_retry = NULL; | 
|  | 511 |  | 
|  | 512 | /* Must be done under the lock to serialise against cancellation. | 
|  | 513 | * Call this aio_fput as it duplicates fput via the fput_work. | 
|  | 514 | */ | 
| Nick Piggin | 095975d | 2006-01-08 01:02:19 -0800 | [diff] [blame] | 515 | if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | get_ioctx(ctx); | 
|  | 517 | spin_lock(&fput_lock); | 
|  | 518 | list_add(&req->ki_list, &fput_head); | 
|  | 519 | spin_unlock(&fput_lock); | 
|  | 520 | queue_work(aio_wq, &fput_work); | 
|  | 521 | } else | 
|  | 522 | really_put_req(ctx, req); | 
|  | 523 | return 1; | 
|  | 524 | } | 
|  | 525 |  | 
|  | 526 | /* aio_put_req | 
|  | 527 | *	Returns true if this put was the last user of the kiocb, | 
|  | 528 | *	false if the request is still in use. | 
|  | 529 | */ | 
|  | 530 | int fastcall aio_put_req(struct kiocb *req) | 
|  | 531 | { | 
|  | 532 | struct kioctx *ctx = req->ki_ctx; | 
|  | 533 | int ret; | 
|  | 534 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 535 | ret = __aio_put_req(ctx, req); | 
|  | 536 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 537 | if (ret) | 
|  | 538 | put_ioctx(ctx); | 
|  | 539 | return ret; | 
|  | 540 | } | 
|  | 541 |  | 
|  | 542 | /*	Lookup an ioctx id.  ioctx_list is lockless for reads. | 
|  | 543 | *	FIXME: this is O(n) and is only suitable for development. | 
|  | 544 | */ | 
|  | 545 | struct kioctx *lookup_ioctx(unsigned long ctx_id) | 
|  | 546 | { | 
|  | 547 | struct kioctx *ioctx; | 
|  | 548 | struct mm_struct *mm; | 
|  | 549 |  | 
|  | 550 | mm = current->mm; | 
|  | 551 | read_lock(&mm->ioctx_list_lock); | 
|  | 552 | for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next) | 
|  | 553 | if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) { | 
|  | 554 | get_ioctx(ioctx); | 
|  | 555 | break; | 
|  | 556 | } | 
|  | 557 | read_unlock(&mm->ioctx_list_lock); | 
|  | 558 |  | 
|  | 559 | return ioctx; | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | /* | 
|  | 563 | * use_mm | 
|  | 564 | *	Makes the calling kernel thread take on the specified | 
|  | 565 | *	mm context. | 
|  | 566 | *	Called by the retry thread execute retries within the | 
|  | 567 | *	iocb issuer's mm context, so that copy_from/to_user | 
|  | 568 | *	operations work seamlessly for aio. | 
|  | 569 | *	(Note: this routine is intended to be called only | 
|  | 570 | *	from a kernel thread context) | 
|  | 571 | */ | 
|  | 572 | static void use_mm(struct mm_struct *mm) | 
|  | 573 | { | 
|  | 574 | struct mm_struct *active_mm; | 
|  | 575 | struct task_struct *tsk = current; | 
|  | 576 |  | 
|  | 577 | task_lock(tsk); | 
|  | 578 | tsk->flags |= PF_BORROWED_MM; | 
|  | 579 | active_mm = tsk->active_mm; | 
|  | 580 | atomic_inc(&mm->mm_count); | 
|  | 581 | tsk->mm = mm; | 
|  | 582 | tsk->active_mm = mm; | 
| Paolo 'Blaisorblade' Giarrusso | 1e40cd3 | 2005-09-03 15:57:25 -0700 | [diff] [blame] | 583 | /* | 
|  | 584 | * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise | 
|  | 585 | * it won't work. Update it accordingly if you change it here | 
|  | 586 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | activate_mm(active_mm, mm); | 
|  | 588 | task_unlock(tsk); | 
|  | 589 |  | 
|  | 590 | mmdrop(active_mm); | 
|  | 591 | } | 
|  | 592 |  | 
|  | 593 | /* | 
|  | 594 | * unuse_mm | 
|  | 595 | *	Reverses the effect of use_mm, i.e. releases the | 
|  | 596 | *	specified mm context which was earlier taken on | 
|  | 597 | *	by the calling kernel thread | 
|  | 598 | *	(Note: this routine is intended to be called only | 
|  | 599 | *	from a kernel thread context) | 
|  | 600 | * | 
|  | 601 | * Comments: Called with ctx->ctx_lock held. This nests | 
|  | 602 | * task_lock instead ctx_lock. | 
|  | 603 | */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 604 | static void unuse_mm(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | { | 
|  | 606 | struct task_struct *tsk = current; | 
|  | 607 |  | 
|  | 608 | task_lock(tsk); | 
|  | 609 | tsk->flags &= ~PF_BORROWED_MM; | 
|  | 610 | tsk->mm = NULL; | 
|  | 611 | /* active_mm is still 'mm' */ | 
|  | 612 | enter_lazy_tlb(mm, tsk); | 
|  | 613 | task_unlock(tsk); | 
|  | 614 | } | 
|  | 615 |  | 
|  | 616 | /* | 
|  | 617 | * Queue up a kiocb to be retried. Assumes that the kiocb | 
|  | 618 | * has already been marked as kicked, and places it on | 
|  | 619 | * the retry run list for the corresponding ioctx, if it | 
|  | 620 | * isn't already queued. Returns 1 if it actually queued | 
|  | 621 | * the kiocb (to tell the caller to activate the work | 
|  | 622 | * queue to process it), or 0, if it found that it was | 
|  | 623 | * already queued. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | */ | 
|  | 625 | static inline int __queue_kicked_iocb(struct kiocb *iocb) | 
|  | 626 | { | 
|  | 627 | struct kioctx *ctx = iocb->ki_ctx; | 
|  | 628 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 629 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 630 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | if (list_empty(&iocb->ki_run_list)) { | 
|  | 632 | list_add_tail(&iocb->ki_run_list, | 
|  | 633 | &ctx->run_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | return 1; | 
|  | 635 | } | 
|  | 636 | return 0; | 
|  | 637 | } | 
|  | 638 |  | 
|  | 639 | /* aio_run_iocb | 
|  | 640 | *	This is the core aio execution routine. It is | 
|  | 641 | *	invoked both for initial i/o submission and | 
|  | 642 | *	subsequent retries via the aio_kick_handler. | 
|  | 643 | *	Expects to be invoked with iocb->ki_ctx->lock | 
| Andreas Mohr | d6e05ed | 2006-06-26 18:35:02 +0200 | [diff] [blame] | 644 | *	already held. The lock is released and reacquired | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | *	as needed during processing. | 
|  | 646 | * | 
|  | 647 | * Calls the iocb retry method (already setup for the | 
|  | 648 | * iocb on initial submission) for operation specific | 
|  | 649 | * handling, but takes care of most of common retry | 
|  | 650 | * execution details for a given iocb. The retry method | 
|  | 651 | * needs to be non-blocking as far as possible, to avoid | 
|  | 652 | * holding up other iocbs waiting to be serviced by the | 
|  | 653 | * retry kernel thread. | 
|  | 654 | * | 
|  | 655 | * The trickier parts in this code have to do with | 
|  | 656 | * ensuring that only one retry instance is in progress | 
|  | 657 | * for a given iocb at any time. Providing that guarantee | 
|  | 658 | * simplifies the coding of individual aio operations as | 
|  | 659 | * it avoids various potential races. | 
|  | 660 | */ | 
|  | 661 | static ssize_t aio_run_iocb(struct kiocb *iocb) | 
|  | 662 | { | 
|  | 663 | struct kioctx	*ctx = iocb->ki_ctx; | 
|  | 664 | ssize_t (*retry)(struct kiocb *); | 
|  | 665 | ssize_t ret; | 
|  | 666 |  | 
|  | 667 | if (iocb->ki_retried++ > 1024*1024) { | 
|  | 668 | printk("Maximal retry count.  Bytes done %Zd\n", | 
|  | 669 | iocb->ki_nbytes - iocb->ki_left); | 
|  | 670 | return -EAGAIN; | 
|  | 671 | } | 
|  | 672 |  | 
|  | 673 | if (!(iocb->ki_retried & 0xff)) { | 
| Ken Chen | 644d3a0 | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 674 | pr_debug("%ld retry: %d of %d\n", iocb->ki_retried, | 
|  | 675 | iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | } | 
|  | 677 |  | 
|  | 678 | if (!(retry = iocb->ki_retry)) { | 
|  | 679 | printk("aio_run_iocb: iocb->ki_retry = NULL\n"); | 
|  | 680 | return 0; | 
|  | 681 | } | 
|  | 682 |  | 
|  | 683 | /* | 
|  | 684 | * We don't want the next retry iteration for this | 
|  | 685 | * operation to start until this one has returned and | 
|  | 686 | * updated the iocb state. However, wait_queue functions | 
|  | 687 | * can trigger a kick_iocb from interrupt context in the | 
|  | 688 | * meantime, indicating that data is available for the next | 
|  | 689 | * iteration. We want to remember that and enable the | 
|  | 690 | * next retry iteration _after_ we are through with | 
|  | 691 | * this one. | 
|  | 692 | * | 
|  | 693 | * So, in order to be able to register a "kick", but | 
|  | 694 | * prevent it from being queued now, we clear the kick | 
|  | 695 | * flag, but make the kick code *think* that the iocb is | 
|  | 696 | * still on the run list until we are actually done. | 
|  | 697 | * When we are done with this iteration, we check if | 
|  | 698 | * the iocb was kicked in the meantime and if so, queue | 
|  | 699 | * it up afresh. | 
|  | 700 | */ | 
|  | 701 |  | 
|  | 702 | kiocbClearKicked(iocb); | 
|  | 703 |  | 
|  | 704 | /* | 
|  | 705 | * This is so that aio_complete knows it doesn't need to | 
|  | 706 | * pull the iocb off the run list (We can't just call | 
|  | 707 | * INIT_LIST_HEAD because we don't want a kick_iocb to | 
|  | 708 | * queue this on the run list yet) | 
|  | 709 | */ | 
|  | 710 | iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; | 
|  | 711 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 712 |  | 
|  | 713 | /* Quit retrying if the i/o has been cancelled */ | 
|  | 714 | if (kiocbIsCancelled(iocb)) { | 
|  | 715 | ret = -EINTR; | 
|  | 716 | aio_complete(iocb, ret, 0); | 
|  | 717 | /* must not access the iocb after this */ | 
|  | 718 | goto out; | 
|  | 719 | } | 
|  | 720 |  | 
|  | 721 | /* | 
|  | 722 | * Now we are all set to call the retry method in async | 
|  | 723 | * context. By setting this thread's io_wait context | 
|  | 724 | * to point to the wait queue entry inside the currently | 
|  | 725 | * running iocb for the duration of the retry, we ensure | 
|  | 726 | * that async notification wakeups are queued by the | 
|  | 727 | * operation instead of blocking waits, and when notified, | 
|  | 728 | * cause the iocb to be kicked for continuation (through | 
|  | 729 | * the aio_wake_function callback). | 
|  | 730 | */ | 
|  | 731 | BUG_ON(current->io_wait != NULL); | 
|  | 732 | current->io_wait = &iocb->ki_wait; | 
|  | 733 | ret = retry(iocb); | 
|  | 734 | current->io_wait = NULL; | 
|  | 735 |  | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 736 | if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { | 
|  | 737 | BUG_ON(!list_empty(&iocb->ki_wait.task_list)); | 
|  | 738 | aio_complete(iocb, ret, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | } | 
|  | 740 | out: | 
|  | 741 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 742 |  | 
|  | 743 | if (-EIOCBRETRY == ret) { | 
|  | 744 | /* | 
|  | 745 | * OK, now that we are done with this iteration | 
|  | 746 | * and know that there is more left to go, | 
|  | 747 | * this is where we let go so that a subsequent | 
|  | 748 | * "kick" can start the next iteration | 
|  | 749 | */ | 
|  | 750 |  | 
|  | 751 | /* will make __queue_kicked_iocb succeed from here on */ | 
|  | 752 | INIT_LIST_HEAD(&iocb->ki_run_list); | 
|  | 753 | /* we must queue the next iteration ourselves, if it | 
|  | 754 | * has already been kicked */ | 
|  | 755 | if (kiocbIsKicked(iocb)) { | 
|  | 756 | __queue_kicked_iocb(iocb); | 
| Sébastien Dugu | c016e22 | 2005-06-28 20:44:59 -0700 | [diff] [blame] | 757 |  | 
|  | 758 | /* | 
|  | 759 | * __queue_kicked_iocb will always return 1 here, because | 
|  | 760 | * iocb->ki_run_list is empty at this point so it should | 
|  | 761 | * be safe to unconditionally queue the context into the | 
|  | 762 | * work queue. | 
|  | 763 | */ | 
|  | 764 | aio_queue_work(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | } | 
|  | 766 | } | 
|  | 767 | return ret; | 
|  | 768 | } | 
|  | 769 |  | 
|  | 770 | /* | 
|  | 771 | * __aio_run_iocbs: | 
|  | 772 | * 	Process all pending retries queued on the ioctx | 
|  | 773 | * 	run list. | 
|  | 774 | * Assumes it is operating within the aio issuer's mm | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 775 | * context. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | */ | 
|  | 777 | static int __aio_run_iocbs(struct kioctx *ctx) | 
|  | 778 | { | 
|  | 779 | struct kiocb *iocb; | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 780 | struct list_head run_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 782 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 783 |  | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 784 | list_replace_init(&ctx->run_list, &run_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | while (!list_empty(&run_list)) { | 
|  | 786 | iocb = list_entry(run_list.next, struct kiocb, | 
|  | 787 | ki_run_list); | 
|  | 788 | list_del(&iocb->ki_run_list); | 
|  | 789 | /* | 
|  | 790 | * Hold an extra reference while retrying i/o. | 
|  | 791 | */ | 
|  | 792 | iocb->ki_users++;       /* grab extra reference */ | 
|  | 793 | aio_run_iocb(iocb); | 
|  | 794 | if (__aio_put_req(ctx, iocb))  /* drop extra ref */ | 
|  | 795 | put_ioctx(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | if (!list_empty(&ctx->run_list)) | 
|  | 798 | return 1; | 
|  | 799 | return 0; | 
|  | 800 | } | 
|  | 801 |  | 
|  | 802 | static void aio_queue_work(struct kioctx * ctx) | 
|  | 803 | { | 
|  | 804 | unsigned long timeout; | 
|  | 805 | /* | 
|  | 806 | * if someone is waiting, get the work started right | 
|  | 807 | * away, otherwise, use a longer delay | 
|  | 808 | */ | 
|  | 809 | smp_mb(); | 
|  | 810 | if (waitqueue_active(&ctx->wait)) | 
|  | 811 | timeout = 1; | 
|  | 812 | else | 
|  | 813 | timeout = HZ/10; | 
|  | 814 | queue_delayed_work(aio_wq, &ctx->wq, timeout); | 
|  | 815 | } | 
|  | 816 |  | 
|  | 817 |  | 
|  | 818 | /* | 
|  | 819 | * aio_run_iocbs: | 
|  | 820 | * 	Process all pending retries queued on the ioctx | 
|  | 821 | * 	run list. | 
|  | 822 | * Assumes it is operating within the aio issuer's mm | 
|  | 823 | * context. | 
|  | 824 | */ | 
|  | 825 | static inline void aio_run_iocbs(struct kioctx *ctx) | 
|  | 826 | { | 
|  | 827 | int requeue; | 
|  | 828 |  | 
|  | 829 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 830 |  | 
|  | 831 | requeue = __aio_run_iocbs(ctx); | 
|  | 832 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 833 | if (requeue) | 
|  | 834 | aio_queue_work(ctx); | 
|  | 835 | } | 
|  | 836 |  | 
|  | 837 | /* | 
|  | 838 | * just like aio_run_iocbs, but keeps running them until | 
|  | 839 | * the list stays empty | 
|  | 840 | */ | 
|  | 841 | static inline void aio_run_all_iocbs(struct kioctx *ctx) | 
|  | 842 | { | 
|  | 843 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 844 | while (__aio_run_iocbs(ctx)) | 
|  | 845 | ; | 
|  | 846 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 847 | } | 
|  | 848 |  | 
|  | 849 | /* | 
|  | 850 | * aio_kick_handler: | 
|  | 851 | * 	Work queue handler triggered to process pending | 
|  | 852 | * 	retries on an ioctx. Takes on the aio issuer's | 
|  | 853 | *	mm context before running the iocbs, so that | 
|  | 854 | *	copy_xxx_user operates on the issuer's address | 
|  | 855 | *      space. | 
|  | 856 | * Run on aiod's context. | 
|  | 857 | */ | 
|  | 858 | static void aio_kick_handler(void *data) | 
|  | 859 | { | 
|  | 860 | struct kioctx *ctx = data; | 
|  | 861 | mm_segment_t oldfs = get_fs(); | 
|  | 862 | int requeue; | 
|  | 863 |  | 
|  | 864 | set_fs(USER_DS); | 
|  | 865 | use_mm(ctx->mm); | 
|  | 866 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 867 | requeue =__aio_run_iocbs(ctx); | 
|  | 868 | unuse_mm(ctx->mm); | 
|  | 869 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 870 | set_fs(oldfs); | 
|  | 871 | /* | 
|  | 872 | * we're in a worker thread already, don't use queue_delayed_work, | 
|  | 873 | */ | 
|  | 874 | if (requeue) | 
|  | 875 | queue_work(aio_wq, &ctx->wq); | 
|  | 876 | } | 
|  | 877 |  | 
|  | 878 |  | 
|  | 879 | /* | 
|  | 880 | * Called by kick_iocb to queue the kiocb for retry | 
|  | 881 | * and if required activate the aio work queue to process | 
|  | 882 | * it | 
|  | 883 | */ | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 884 | static void try_queue_kicked_iocb(struct kiocb *iocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | { | 
|  | 886 | struct kioctx	*ctx = iocb->ki_ctx; | 
|  | 887 | unsigned long flags; | 
|  | 888 | int run = 0; | 
|  | 889 |  | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 890 | /* We're supposed to be the only path putting the iocb back on the run | 
|  | 891 | * list.  If we find that the iocb is *back* on a wait queue already | 
|  | 892 | * than retry has happened before we could queue the iocb.  This also | 
|  | 893 | * means that the retry could have completed and freed our iocb, no | 
|  | 894 | * good. */ | 
|  | 895 | BUG_ON((!list_empty(&iocb->ki_wait.task_list))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 |  | 
|  | 897 | spin_lock_irqsave(&ctx->ctx_lock, flags); | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 898 | /* set this inside the lock so that we can't race with aio_run_iocb() | 
|  | 899 | * testing it and putting the iocb on the run list under the lock */ | 
|  | 900 | if (!kiocbTryKick(iocb)) | 
|  | 901 | run = __queue_kicked_iocb(iocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | 
| Ken Chen | 644d3a0 | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 903 | if (run) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | aio_queue_work(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 | } | 
|  | 906 |  | 
|  | 907 | /* | 
|  | 908 | * kick_iocb: | 
|  | 909 | *      Called typically from a wait queue callback context | 
|  | 910 | *      (aio_wake_function) to trigger a retry of the iocb. | 
|  | 911 | *      The retry is usually executed by aio workqueue | 
|  | 912 | *      threads (See aio_kick_handler). | 
|  | 913 | */ | 
|  | 914 | void fastcall kick_iocb(struct kiocb *iocb) | 
|  | 915 | { | 
|  | 916 | /* sync iocbs are easy: they can only ever be executing from a | 
|  | 917 | * single context. */ | 
|  | 918 | if (is_sync_kiocb(iocb)) { | 
|  | 919 | kiocbSetKicked(iocb); | 
|  | 920 | wake_up_process(iocb->ki_obj.tsk); | 
|  | 921 | return; | 
|  | 922 | } | 
|  | 923 |  | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 924 | try_queue_kicked_iocb(iocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | } | 
|  | 926 | EXPORT_SYMBOL(kick_iocb); | 
|  | 927 |  | 
|  | 928 | /* aio_complete | 
|  | 929 | *	Called when the io request on the given iocb is complete. | 
|  | 930 | *	Returns true if this is the last user of the request.  The | 
|  | 931 | *	only other user of the request can be the cancellation code. | 
|  | 932 | */ | 
|  | 933 | int fastcall aio_complete(struct kiocb *iocb, long res, long res2) | 
|  | 934 | { | 
|  | 935 | struct kioctx	*ctx = iocb->ki_ctx; | 
|  | 936 | struct aio_ring_info	*info; | 
|  | 937 | struct aio_ring	*ring; | 
|  | 938 | struct io_event	*event; | 
|  | 939 | unsigned long	flags; | 
|  | 940 | unsigned long	tail; | 
|  | 941 | int		ret; | 
|  | 942 |  | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 943 | /* | 
|  | 944 | * Special case handling for sync iocbs: | 
|  | 945 | *  - events go directly into the iocb for fast handling | 
|  | 946 | *  - the sync task with the iocb in its stack holds the single iocb | 
|  | 947 | *    ref, no other paths have a way to get another ref | 
|  | 948 | *  - the sync task helpfully left a reference to itself in the iocb | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | */ | 
|  | 950 | if (is_sync_kiocb(iocb)) { | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 951 | BUG_ON(iocb->ki_users != 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | iocb->ki_user_data = res; | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 953 | iocb->ki_users = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | wake_up_process(iocb->ki_obj.tsk); | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 955 | return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | } | 
|  | 957 |  | 
|  | 958 | info = &ctx->ring_info; | 
|  | 959 |  | 
|  | 960 | /* add a completion event to the ring buffer. | 
|  | 961 | * must be done holding ctx->ctx_lock to prevent | 
|  | 962 | * other code from messing with the tail | 
|  | 963 | * pointer since we might be called from irq | 
|  | 964 | * context. | 
|  | 965 | */ | 
|  | 966 | spin_lock_irqsave(&ctx->ctx_lock, flags); | 
|  | 967 |  | 
|  | 968 | if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) | 
|  | 969 | list_del_init(&iocb->ki_run_list); | 
|  | 970 |  | 
|  | 971 | /* | 
|  | 972 | * cancelled requests don't get events, userland was given one | 
|  | 973 | * when the event got cancelled. | 
|  | 974 | */ | 
|  | 975 | if (kiocbIsCancelled(iocb)) | 
|  | 976 | goto put_rq; | 
|  | 977 |  | 
|  | 978 | ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); | 
|  | 979 |  | 
|  | 980 | tail = info->tail; | 
|  | 981 | event = aio_ring_event(info, tail, KM_IRQ0); | 
| Ken Chen | 4bf69b2 | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 982 | if (++tail >= info->nr) | 
|  | 983 | tail = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 |  | 
|  | 985 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; | 
|  | 986 | event->data = iocb->ki_user_data; | 
|  | 987 | event->res = res; | 
|  | 988 | event->res2 = res2; | 
|  | 989 |  | 
|  | 990 | dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", | 
|  | 991 | ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, | 
|  | 992 | res, res2); | 
|  | 993 |  | 
|  | 994 | /* after flagging the request as done, we | 
|  | 995 | * must never even look at it again | 
|  | 996 | */ | 
|  | 997 | smp_wmb();	/* make event visible before updating tail */ | 
|  | 998 |  | 
|  | 999 | info->tail = tail; | 
|  | 1000 | ring->tail = tail; | 
|  | 1001 |  | 
|  | 1002 | put_aio_ring_event(event, KM_IRQ0); | 
|  | 1003 | kunmap_atomic(ring, KM_IRQ1); | 
|  | 1004 |  | 
|  | 1005 | pr_debug("added to ring %p at [%lu]\n", iocb, tail); | 
|  | 1006 |  | 
| Ken Chen | 644d3a0 | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 1007 | pr_debug("%ld retries: %d of %d\n", iocb->ki_retried, | 
|  | 1008 | iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | put_rq: | 
|  | 1010 | /* everything turned out well, dispose of the aiocb. */ | 
|  | 1011 | ret = __aio_put_req(ctx, iocb); | 
|  | 1012 |  | 
|  | 1013 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | 
|  | 1014 |  | 
|  | 1015 | if (waitqueue_active(&ctx->wait)) | 
|  | 1016 | wake_up(&ctx->wait); | 
|  | 1017 |  | 
|  | 1018 | if (ret) | 
|  | 1019 | put_ioctx(ctx); | 
|  | 1020 |  | 
|  | 1021 | return ret; | 
|  | 1022 | } | 
|  | 1023 |  | 
|  | 1024 | /* aio_read_evt | 
|  | 1025 | *	Pull an event off of the ioctx's event ring.  Returns the number of | 
|  | 1026 | *	events fetched (0 or 1 ;-) | 
|  | 1027 | *	FIXME: make this use cmpxchg. | 
|  | 1028 | *	TODO: make the ringbuffer user mmap()able (requires FIXME). | 
|  | 1029 | */ | 
|  | 1030 | static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) | 
|  | 1031 | { | 
|  | 1032 | struct aio_ring_info *info = &ioctx->ring_info; | 
|  | 1033 | struct aio_ring *ring; | 
|  | 1034 | unsigned long head; | 
|  | 1035 | int ret = 0; | 
|  | 1036 |  | 
|  | 1037 | ring = kmap_atomic(info->ring_pages[0], KM_USER0); | 
|  | 1038 | dprintk("in aio_read_evt h%lu t%lu m%lu\n", | 
|  | 1039 | (unsigned long)ring->head, (unsigned long)ring->tail, | 
|  | 1040 | (unsigned long)ring->nr); | 
|  | 1041 |  | 
|  | 1042 | if (ring->head == ring->tail) | 
|  | 1043 | goto out; | 
|  | 1044 |  | 
|  | 1045 | spin_lock(&info->ring_lock); | 
|  | 1046 |  | 
|  | 1047 | head = ring->head % info->nr; | 
|  | 1048 | if (head != ring->tail) { | 
|  | 1049 | struct io_event *evp = aio_ring_event(info, head, KM_USER1); | 
|  | 1050 | *ent = *evp; | 
|  | 1051 | head = (head + 1) % info->nr; | 
|  | 1052 | smp_mb(); /* finish reading the event before updatng the head */ | 
|  | 1053 | ring->head = head; | 
|  | 1054 | ret = 1; | 
|  | 1055 | put_aio_ring_event(evp, KM_USER1); | 
|  | 1056 | } | 
|  | 1057 | spin_unlock(&info->ring_lock); | 
|  | 1058 |  | 
|  | 1059 | out: | 
|  | 1060 | kunmap_atomic(ring, KM_USER0); | 
|  | 1061 | dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret, | 
|  | 1062 | (unsigned long)ring->head, (unsigned long)ring->tail); | 
|  | 1063 | return ret; | 
|  | 1064 | } | 
|  | 1065 |  | 
|  | 1066 | struct aio_timeout { | 
|  | 1067 | struct timer_list	timer; | 
|  | 1068 | int			timed_out; | 
|  | 1069 | struct task_struct	*p; | 
|  | 1070 | }; | 
|  | 1071 |  | 
|  | 1072 | static void timeout_func(unsigned long data) | 
|  | 1073 | { | 
|  | 1074 | struct aio_timeout *to = (struct aio_timeout *)data; | 
|  | 1075 |  | 
|  | 1076 | to->timed_out = 1; | 
|  | 1077 | wake_up_process(to->p); | 
|  | 1078 | } | 
|  | 1079 |  | 
|  | 1080 | static inline void init_timeout(struct aio_timeout *to) | 
|  | 1081 | { | 
|  | 1082 | init_timer(&to->timer); | 
|  | 1083 | to->timer.data = (unsigned long)to; | 
|  | 1084 | to->timer.function = timeout_func; | 
|  | 1085 | to->timed_out = 0; | 
|  | 1086 | to->p = current; | 
|  | 1087 | } | 
|  | 1088 |  | 
|  | 1089 | static inline void set_timeout(long start_jiffies, struct aio_timeout *to, | 
|  | 1090 | const struct timespec *ts) | 
|  | 1091 | { | 
|  | 1092 | to->timer.expires = start_jiffies + timespec_to_jiffies(ts); | 
|  | 1093 | if (time_after(to->timer.expires, jiffies)) | 
|  | 1094 | add_timer(&to->timer); | 
|  | 1095 | else | 
|  | 1096 | to->timed_out = 1; | 
|  | 1097 | } | 
|  | 1098 |  | 
|  | 1099 | static inline void clear_timeout(struct aio_timeout *to) | 
|  | 1100 | { | 
|  | 1101 | del_singleshot_timer_sync(&to->timer); | 
|  | 1102 | } | 
|  | 1103 |  | 
|  | 1104 | static int read_events(struct kioctx *ctx, | 
|  | 1105 | long min_nr, long nr, | 
|  | 1106 | struct io_event __user *event, | 
|  | 1107 | struct timespec __user *timeout) | 
|  | 1108 | { | 
|  | 1109 | long			start_jiffies = jiffies; | 
|  | 1110 | struct task_struct	*tsk = current; | 
|  | 1111 | DECLARE_WAITQUEUE(wait, tsk); | 
|  | 1112 | int			ret; | 
|  | 1113 | int			i = 0; | 
|  | 1114 | struct io_event		ent; | 
|  | 1115 | struct aio_timeout	to; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | int			retry = 0; | 
|  | 1117 |  | 
|  | 1118 | /* needed to zero any padding within an entry (there shouldn't be | 
|  | 1119 | * any, but C is fun! | 
|  | 1120 | */ | 
|  | 1121 | memset(&ent, 0, sizeof(ent)); | 
|  | 1122 | retry: | 
|  | 1123 | ret = 0; | 
|  | 1124 | while (likely(i < nr)) { | 
|  | 1125 | ret = aio_read_evt(ctx, &ent); | 
|  | 1126 | if (unlikely(ret <= 0)) | 
|  | 1127 | break; | 
|  | 1128 |  | 
|  | 1129 | dprintk("read event: %Lx %Lx %Lx %Lx\n", | 
|  | 1130 | ent.data, ent.obj, ent.res, ent.res2); | 
|  | 1131 |  | 
|  | 1132 | /* Could we split the check in two? */ | 
|  | 1133 | ret = -EFAULT; | 
|  | 1134 | if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { | 
|  | 1135 | dprintk("aio: lost an event due to EFAULT.\n"); | 
|  | 1136 | break; | 
|  | 1137 | } | 
|  | 1138 | ret = 0; | 
|  | 1139 |  | 
|  | 1140 | /* Good, event copied to userland, update counts. */ | 
|  | 1141 | event ++; | 
|  | 1142 | i ++; | 
|  | 1143 | } | 
|  | 1144 |  | 
|  | 1145 | if (min_nr <= i) | 
|  | 1146 | return i; | 
|  | 1147 | if (ret) | 
|  | 1148 | return ret; | 
|  | 1149 |  | 
|  | 1150 | /* End fast path */ | 
|  | 1151 |  | 
|  | 1152 | /* racey check, but it gets redone */ | 
|  | 1153 | if (!retry && unlikely(!list_empty(&ctx->run_list))) { | 
|  | 1154 | retry = 1; | 
|  | 1155 | aio_run_all_iocbs(ctx); | 
|  | 1156 | goto retry; | 
|  | 1157 | } | 
|  | 1158 |  | 
|  | 1159 | init_timeout(&to); | 
|  | 1160 | if (timeout) { | 
|  | 1161 | struct timespec	ts; | 
|  | 1162 | ret = -EFAULT; | 
|  | 1163 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) | 
|  | 1164 | goto out; | 
|  | 1165 |  | 
|  | 1166 | set_timeout(start_jiffies, &to, &ts); | 
|  | 1167 | } | 
|  | 1168 |  | 
|  | 1169 | while (likely(i < nr)) { | 
|  | 1170 | add_wait_queue_exclusive(&ctx->wait, &wait); | 
|  | 1171 | do { | 
|  | 1172 | set_task_state(tsk, TASK_INTERRUPTIBLE); | 
|  | 1173 | ret = aio_read_evt(ctx, &ent); | 
|  | 1174 | if (ret) | 
|  | 1175 | break; | 
|  | 1176 | if (min_nr <= i) | 
|  | 1177 | break; | 
|  | 1178 | ret = 0; | 
|  | 1179 | if (to.timed_out)	/* Only check after read evt */ | 
|  | 1180 | break; | 
|  | 1181 | schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | if (signal_pending(tsk)) { | 
|  | 1183 | ret = -EINTR; | 
|  | 1184 | break; | 
|  | 1185 | } | 
|  | 1186 | /*ret = aio_read_evt(ctx, &ent);*/ | 
|  | 1187 | } while (1) ; | 
|  | 1188 |  | 
|  | 1189 | set_task_state(tsk, TASK_RUNNING); | 
|  | 1190 | remove_wait_queue(&ctx->wait, &wait); | 
|  | 1191 |  | 
|  | 1192 | if (unlikely(ret <= 0)) | 
|  | 1193 | break; | 
|  | 1194 |  | 
|  | 1195 | ret = -EFAULT; | 
|  | 1196 | if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { | 
|  | 1197 | dprintk("aio: lost an event due to EFAULT.\n"); | 
|  | 1198 | break; | 
|  | 1199 | } | 
|  | 1200 |  | 
|  | 1201 | /* Good, event copied to userland, update counts. */ | 
|  | 1202 | event ++; | 
|  | 1203 | i ++; | 
|  | 1204 | } | 
|  | 1205 |  | 
|  | 1206 | if (timeout) | 
|  | 1207 | clear_timeout(&to); | 
|  | 1208 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | return i ? i : ret; | 
|  | 1210 | } | 
|  | 1211 |  | 
|  | 1212 | /* Take an ioctx and remove it from the list of ioctx's.  Protects | 
|  | 1213 | * against races with itself via ->dead. | 
|  | 1214 | */ | 
|  | 1215 | static void io_destroy(struct kioctx *ioctx) | 
|  | 1216 | { | 
|  | 1217 | struct mm_struct *mm = current->mm; | 
|  | 1218 | struct kioctx **tmp; | 
|  | 1219 | int was_dead; | 
|  | 1220 |  | 
|  | 1221 | /* delete the entry from the list is someone else hasn't already */ | 
|  | 1222 | write_lock(&mm->ioctx_list_lock); | 
|  | 1223 | was_dead = ioctx->dead; | 
|  | 1224 | ioctx->dead = 1; | 
|  | 1225 | for (tmp = &mm->ioctx_list; *tmp && *tmp != ioctx; | 
|  | 1226 | tmp = &(*tmp)->next) | 
|  | 1227 | ; | 
|  | 1228 | if (*tmp) | 
|  | 1229 | *tmp = ioctx->next; | 
|  | 1230 | write_unlock(&mm->ioctx_list_lock); | 
|  | 1231 |  | 
|  | 1232 | dprintk("aio_release(%p)\n", ioctx); | 
|  | 1233 | if (likely(!was_dead)) | 
|  | 1234 | put_ioctx(ioctx);	/* twice for the list */ | 
|  | 1235 |  | 
|  | 1236 | aio_cancel_all(ioctx); | 
|  | 1237 | wait_for_all_aios(ioctx); | 
|  | 1238 | put_ioctx(ioctx);	/* once for the lookup */ | 
|  | 1239 | } | 
|  | 1240 |  | 
|  | 1241 | /* sys_io_setup: | 
|  | 1242 | *	Create an aio_context capable of receiving at least nr_events. | 
|  | 1243 | *	ctxp must not point to an aio_context that already exists, and | 
|  | 1244 | *	must be initialized to 0 prior to the call.  On successful | 
|  | 1245 | *	creation of the aio_context, *ctxp is filled in with the resulting | 
|  | 1246 | *	handle.  May fail with -EINVAL if *ctxp is not initialized, | 
|  | 1247 | *	if the specified nr_events exceeds internal limits.  May fail | 
|  | 1248 | *	with -EAGAIN if the specified nr_events exceeds the user's limit | 
|  | 1249 | *	of available events.  May fail with -ENOMEM if insufficient kernel | 
|  | 1250 | *	resources are available.  May fail with -EFAULT if an invalid | 
|  | 1251 | *	pointer is passed for ctxp.  Will fail with -ENOSYS if not | 
|  | 1252 | *	implemented. | 
|  | 1253 | */ | 
|  | 1254 | asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp) | 
|  | 1255 | { | 
|  | 1256 | struct kioctx *ioctx = NULL; | 
|  | 1257 | unsigned long ctx; | 
|  | 1258 | long ret; | 
|  | 1259 |  | 
|  | 1260 | ret = get_user(ctx, ctxp); | 
|  | 1261 | if (unlikely(ret)) | 
|  | 1262 | goto out; | 
|  | 1263 |  | 
|  | 1264 | ret = -EINVAL; | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 1265 | if (unlikely(ctx || nr_events == 0)) { | 
|  | 1266 | pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", | 
|  | 1267 | ctx, nr_events); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1268 | goto out; | 
|  | 1269 | } | 
|  | 1270 |  | 
|  | 1271 | ioctx = ioctx_alloc(nr_events); | 
|  | 1272 | ret = PTR_ERR(ioctx); | 
|  | 1273 | if (!IS_ERR(ioctx)) { | 
|  | 1274 | ret = put_user(ioctx->user_id, ctxp); | 
|  | 1275 | if (!ret) | 
|  | 1276 | return 0; | 
|  | 1277 |  | 
|  | 1278 | get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ | 
|  | 1279 | io_destroy(ioctx); | 
|  | 1280 | } | 
|  | 1281 |  | 
|  | 1282 | out: | 
|  | 1283 | return ret; | 
|  | 1284 | } | 
|  | 1285 |  | 
|  | 1286 | /* sys_io_destroy: | 
|  | 1287 | *	Destroy the aio_context specified.  May cancel any outstanding | 
|  | 1288 | *	AIOs and block on completion.  Will fail with -ENOSYS if not | 
|  | 1289 | *	implemented.  May fail with -EFAULT if the context pointed to | 
|  | 1290 | *	is invalid. | 
|  | 1291 | */ | 
|  | 1292 | asmlinkage long sys_io_destroy(aio_context_t ctx) | 
|  | 1293 | { | 
|  | 1294 | struct kioctx *ioctx = lookup_ioctx(ctx); | 
|  | 1295 | if (likely(NULL != ioctx)) { | 
|  | 1296 | io_destroy(ioctx); | 
|  | 1297 | return 0; | 
|  | 1298 | } | 
|  | 1299 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | 
|  | 1300 | return -EINVAL; | 
|  | 1301 | } | 
|  | 1302 |  | 
|  | 1303 | /* | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1304 | * aio_p{read,write} are the default  ki_retry methods for | 
|  | 1305 | * IO_CMD_P{READ,WRITE}.  They maintains kiocb retry state around potentially | 
|  | 1306 | * multiple calls to f_op->aio_read().  They loop around partial progress | 
|  | 1307 | * instead of returning -EIOCBRETRY because they don't have the means to call | 
|  | 1308 | * kick_iocb(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1309 | */ | 
|  | 1310 | static ssize_t aio_pread(struct kiocb *iocb) | 
|  | 1311 | { | 
|  | 1312 | struct file *file = iocb->ki_filp; | 
|  | 1313 | struct address_space *mapping = file->f_mapping; | 
|  | 1314 | struct inode *inode = mapping->host; | 
|  | 1315 | ssize_t ret = 0; | 
|  | 1316 |  | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1317 | do { | 
|  | 1318 | ret = file->f_op->aio_read(iocb, iocb->ki_buf, | 
|  | 1319 | iocb->ki_left, iocb->ki_pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 | /* | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1321 | * Can't just depend on iocb->ki_left to determine | 
|  | 1322 | * whether we are done. This may have been a short read. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 | */ | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1324 | if (ret > 0) { | 
|  | 1325 | iocb->ki_buf += ret; | 
|  | 1326 | iocb->ki_left -= ret; | 
|  | 1327 | } | 
|  | 1328 |  | 
|  | 1329 | /* | 
|  | 1330 | * For pipes and sockets we return once we have some data; for | 
|  | 1331 | * regular files we retry till we complete the entire read or | 
|  | 1332 | * find that we can't read any more data (e.g short reads). | 
|  | 1333 | */ | 
| Zach Brown | 353fb07 | 2005-09-30 11:58:56 -0700 | [diff] [blame] | 1334 | } while (ret > 0 && iocb->ki_left > 0 && | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1335 | !S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 |  | 
|  | 1337 | /* This means we must have transferred all that we could */ | 
|  | 1338 | /* No need to retry anymore */ | 
|  | 1339 | if ((ret == 0) || (iocb->ki_left == 0)) | 
|  | 1340 | ret = iocb->ki_nbytes - iocb->ki_left; | 
|  | 1341 |  | 
|  | 1342 | return ret; | 
|  | 1343 | } | 
|  | 1344 |  | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1345 | /* see aio_pread() */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1346 | static ssize_t aio_pwrite(struct kiocb *iocb) | 
|  | 1347 | { | 
|  | 1348 | struct file *file = iocb->ki_filp; | 
|  | 1349 | ssize_t ret = 0; | 
|  | 1350 |  | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1351 | do { | 
|  | 1352 | ret = file->f_op->aio_write(iocb, iocb->ki_buf, | 
|  | 1353 | iocb->ki_left, iocb->ki_pos); | 
|  | 1354 | if (ret > 0) { | 
|  | 1355 | iocb->ki_buf += ret; | 
|  | 1356 | iocb->ki_left -= ret; | 
|  | 1357 | } | 
| Zach Brown | 353fb07 | 2005-09-30 11:58:56 -0700 | [diff] [blame] | 1358 | } while (ret > 0 && iocb->ki_left > 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | if ((ret == 0) || (iocb->ki_left == 0)) | 
|  | 1361 | ret = iocb->ki_nbytes - iocb->ki_left; | 
|  | 1362 |  | 
|  | 1363 | return ret; | 
|  | 1364 | } | 
|  | 1365 |  | 
|  | 1366 | static ssize_t aio_fdsync(struct kiocb *iocb) | 
|  | 1367 | { | 
|  | 1368 | struct file *file = iocb->ki_filp; | 
|  | 1369 | ssize_t ret = -EINVAL; | 
|  | 1370 |  | 
|  | 1371 | if (file->f_op->aio_fsync) | 
|  | 1372 | ret = file->f_op->aio_fsync(iocb, 1); | 
|  | 1373 | return ret; | 
|  | 1374 | } | 
|  | 1375 |  | 
|  | 1376 | static ssize_t aio_fsync(struct kiocb *iocb) | 
|  | 1377 | { | 
|  | 1378 | struct file *file = iocb->ki_filp; | 
|  | 1379 | ssize_t ret = -EINVAL; | 
|  | 1380 |  | 
|  | 1381 | if (file->f_op->aio_fsync) | 
|  | 1382 | ret = file->f_op->aio_fsync(iocb, 0); | 
|  | 1383 | return ret; | 
|  | 1384 | } | 
|  | 1385 |  | 
|  | 1386 | /* | 
|  | 1387 | * aio_setup_iocb: | 
|  | 1388 | *	Performs the initial checks and aio retry method | 
|  | 1389 | *	setup for the kiocb at the time of io submission. | 
|  | 1390 | */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 1391 | static ssize_t aio_setup_iocb(struct kiocb *kiocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | { | 
|  | 1393 | struct file *file = kiocb->ki_filp; | 
|  | 1394 | ssize_t ret = 0; | 
|  | 1395 |  | 
|  | 1396 | switch (kiocb->ki_opcode) { | 
|  | 1397 | case IOCB_CMD_PREAD: | 
|  | 1398 | ret = -EBADF; | 
|  | 1399 | if (unlikely(!(file->f_mode & FMODE_READ))) | 
|  | 1400 | break; | 
|  | 1401 | ret = -EFAULT; | 
|  | 1402 | if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, | 
|  | 1403 | kiocb->ki_left))) | 
|  | 1404 | break; | 
| Kostik Belousov | 8766ce4 | 2005-10-23 12:57:13 -0700 | [diff] [blame] | 1405 | ret = security_file_permission(file, MAY_READ); | 
|  | 1406 | if (unlikely(ret)) | 
|  | 1407 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | ret = -EINVAL; | 
|  | 1409 | if (file->f_op->aio_read) | 
|  | 1410 | kiocb->ki_retry = aio_pread; | 
|  | 1411 | break; | 
|  | 1412 | case IOCB_CMD_PWRITE: | 
|  | 1413 | ret = -EBADF; | 
|  | 1414 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | 
|  | 1415 | break; | 
|  | 1416 | ret = -EFAULT; | 
|  | 1417 | if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, | 
|  | 1418 | kiocb->ki_left))) | 
|  | 1419 | break; | 
| Kostik Belousov | 8766ce4 | 2005-10-23 12:57:13 -0700 | [diff] [blame] | 1420 | ret = security_file_permission(file, MAY_WRITE); | 
|  | 1421 | if (unlikely(ret)) | 
|  | 1422 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | ret = -EINVAL; | 
|  | 1424 | if (file->f_op->aio_write) | 
|  | 1425 | kiocb->ki_retry = aio_pwrite; | 
|  | 1426 | break; | 
|  | 1427 | case IOCB_CMD_FDSYNC: | 
|  | 1428 | ret = -EINVAL; | 
|  | 1429 | if (file->f_op->aio_fsync) | 
|  | 1430 | kiocb->ki_retry = aio_fdsync; | 
|  | 1431 | break; | 
|  | 1432 | case IOCB_CMD_FSYNC: | 
|  | 1433 | ret = -EINVAL; | 
|  | 1434 | if (file->f_op->aio_fsync) | 
|  | 1435 | kiocb->ki_retry = aio_fsync; | 
|  | 1436 | break; | 
|  | 1437 | default: | 
|  | 1438 | dprintk("EINVAL: io_submit: no operation provided\n"); | 
|  | 1439 | ret = -EINVAL; | 
|  | 1440 | } | 
|  | 1441 |  | 
|  | 1442 | if (!kiocb->ki_retry) | 
|  | 1443 | return ret; | 
|  | 1444 |  | 
|  | 1445 | return 0; | 
|  | 1446 | } | 
|  | 1447 |  | 
|  | 1448 | /* | 
|  | 1449 | * aio_wake_function: | 
|  | 1450 | * 	wait queue callback function for aio notification, | 
|  | 1451 | * 	Simply triggers a retry of the operation via kick_iocb. | 
|  | 1452 | * | 
|  | 1453 | * 	This callback is specified in the wait queue entry in | 
|  | 1454 | *	a kiocb	(current->io_wait points to this wait queue | 
|  | 1455 | *	entry when an aio operation executes; it is used | 
|  | 1456 | * 	instead of a synchronous wait when an i/o blocking | 
|  | 1457 | *	condition is encountered during aio). | 
|  | 1458 | * | 
|  | 1459 | * Note: | 
|  | 1460 | * This routine is executed with the wait queue lock held. | 
|  | 1461 | * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests | 
|  | 1462 | * the ioctx lock inside the wait queue lock. This is safe | 
|  | 1463 | * because this callback isn't used for wait queues which | 
|  | 1464 | * are nested inside ioctx lock (i.e. ctx->wait) | 
|  | 1465 | */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 1466 | static int aio_wake_function(wait_queue_t *wait, unsigned mode, | 
|  | 1467 | int sync, void *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | { | 
|  | 1469 | struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait); | 
|  | 1470 |  | 
|  | 1471 | list_del_init(&wait->task_list); | 
|  | 1472 | kick_iocb(iocb); | 
|  | 1473 | return 1; | 
|  | 1474 | } | 
|  | 1475 |  | 
|  | 1476 | int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 
|  | 1477 | struct iocb *iocb) | 
|  | 1478 | { | 
|  | 1479 | struct kiocb *req; | 
|  | 1480 | struct file *file; | 
|  | 1481 | ssize_t ret; | 
|  | 1482 |  | 
|  | 1483 | /* enforce forwards compatibility on users */ | 
|  | 1484 | if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2 || | 
|  | 1485 | iocb->aio_reserved3)) { | 
|  | 1486 | pr_debug("EINVAL: io_submit: reserve field set\n"); | 
|  | 1487 | return -EINVAL; | 
|  | 1488 | } | 
|  | 1489 |  | 
|  | 1490 | /* prevent overflows */ | 
|  | 1491 | if (unlikely( | 
|  | 1492 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | 
|  | 1493 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | 
|  | 1494 | ((ssize_t)iocb->aio_nbytes < 0) | 
|  | 1495 | )) { | 
|  | 1496 | pr_debug("EINVAL: io_submit: overflow check\n"); | 
|  | 1497 | return -EINVAL; | 
|  | 1498 | } | 
|  | 1499 |  | 
|  | 1500 | file = fget(iocb->aio_fildes); | 
|  | 1501 | if (unlikely(!file)) | 
|  | 1502 | return -EBADF; | 
|  | 1503 |  | 
|  | 1504 | req = aio_get_req(ctx);		/* returns with 2 references to req */ | 
|  | 1505 | if (unlikely(!req)) { | 
|  | 1506 | fput(file); | 
|  | 1507 | return -EAGAIN; | 
|  | 1508 | } | 
|  | 1509 |  | 
|  | 1510 | req->ki_filp = file; | 
| Ken Chen | 212079c | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 1511 | ret = put_user(req->ki_key, &user_iocb->aio_key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | if (unlikely(ret)) { | 
|  | 1513 | dprintk("EFAULT: aio_key\n"); | 
|  | 1514 | goto out_put_req; | 
|  | 1515 | } | 
|  | 1516 |  | 
|  | 1517 | req->ki_obj.user = user_iocb; | 
|  | 1518 | req->ki_user_data = iocb->aio_data; | 
|  | 1519 | req->ki_pos = iocb->aio_offset; | 
|  | 1520 |  | 
|  | 1521 | req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; | 
|  | 1522 | req->ki_left = req->ki_nbytes = iocb->aio_nbytes; | 
|  | 1523 | req->ki_opcode = iocb->aio_lio_opcode; | 
|  | 1524 | init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); | 
|  | 1525 | INIT_LIST_HEAD(&req->ki_wait.task_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 | req->ki_retried = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 |  | 
|  | 1528 | ret = aio_setup_iocb(req); | 
|  | 1529 |  | 
|  | 1530 | if (ret) | 
|  | 1531 | goto out_put_req; | 
|  | 1532 |  | 
|  | 1533 | spin_lock_irq(&ctx->ctx_lock); | 
| Benjamin LaHaise | ac0b1bc | 2005-09-09 13:02:09 -0700 | [diff] [blame] | 1534 | aio_run_iocb(req); | 
| Benjamin LaHaise | ac0b1bc | 2005-09-09 13:02:09 -0700 | [diff] [blame] | 1535 | if (!list_empty(&ctx->run_list)) { | 
| Ken Chen | 954d3e9 | 2005-05-01 08:59:16 -0700 | [diff] [blame] | 1536 | /* drain the run list */ | 
|  | 1537 | while (__aio_run_iocbs(ctx)) | 
|  | 1538 | ; | 
|  | 1539 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 1541 | aio_put_req(req);	/* drop extra ref to req */ | 
|  | 1542 | return 0; | 
|  | 1543 |  | 
|  | 1544 | out_put_req: | 
|  | 1545 | aio_put_req(req);	/* drop extra ref to req */ | 
|  | 1546 | aio_put_req(req);	/* drop i/o ref to req */ | 
|  | 1547 | return ret; | 
|  | 1548 | } | 
|  | 1549 |  | 
|  | 1550 | /* sys_io_submit: | 
|  | 1551 | *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns | 
|  | 1552 | *	the number of iocbs queued.  May return -EINVAL if the aio_context | 
|  | 1553 | *	specified by ctx_id is invalid, if nr is < 0, if the iocb at | 
|  | 1554 | *	*iocbpp[0] is not properly initialized, if the operation specified | 
|  | 1555 | *	is invalid for the file descriptor in the iocb.  May fail with | 
|  | 1556 | *	-EFAULT if any of the data structures point to invalid data.  May | 
|  | 1557 | *	fail with -EBADF if the file descriptor specified in the first | 
|  | 1558 | *	iocb is invalid.  May fail with -EAGAIN if insufficient resources | 
|  | 1559 | *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will | 
|  | 1560 | *	fail with -ENOSYS if not implemented. | 
|  | 1561 | */ | 
|  | 1562 | asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr, | 
|  | 1563 | struct iocb __user * __user *iocbpp) | 
|  | 1564 | { | 
|  | 1565 | struct kioctx *ctx; | 
|  | 1566 | long ret = 0; | 
|  | 1567 | int i; | 
|  | 1568 |  | 
|  | 1569 | if (unlikely(nr < 0)) | 
|  | 1570 | return -EINVAL; | 
|  | 1571 |  | 
|  | 1572 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) | 
|  | 1573 | return -EFAULT; | 
|  | 1574 |  | 
|  | 1575 | ctx = lookup_ioctx(ctx_id); | 
|  | 1576 | if (unlikely(!ctx)) { | 
|  | 1577 | pr_debug("EINVAL: io_submit: invalid context id\n"); | 
|  | 1578 | return -EINVAL; | 
|  | 1579 | } | 
|  | 1580 |  | 
|  | 1581 | /* | 
|  | 1582 | * AKPM: should this return a partial result if some of the IOs were | 
|  | 1583 | * successfully submitted? | 
|  | 1584 | */ | 
|  | 1585 | for (i=0; i<nr; i++) { | 
|  | 1586 | struct iocb __user *user_iocb; | 
|  | 1587 | struct iocb tmp; | 
|  | 1588 |  | 
|  | 1589 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { | 
|  | 1590 | ret = -EFAULT; | 
|  | 1591 | break; | 
|  | 1592 | } | 
|  | 1593 |  | 
|  | 1594 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { | 
|  | 1595 | ret = -EFAULT; | 
|  | 1596 | break; | 
|  | 1597 | } | 
|  | 1598 |  | 
|  | 1599 | ret = io_submit_one(ctx, user_iocb, &tmp); | 
|  | 1600 | if (ret) | 
|  | 1601 | break; | 
|  | 1602 | } | 
|  | 1603 |  | 
|  | 1604 | put_ioctx(ctx); | 
|  | 1605 | return i ? i : ret; | 
|  | 1606 | } | 
|  | 1607 |  | 
|  | 1608 | /* lookup_kiocb | 
|  | 1609 | *	Finds a given iocb for cancellation. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1610 | */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 1611 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | 
|  | 1612 | u32 key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1613 | { | 
|  | 1614 | struct list_head *pos; | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 1615 |  | 
|  | 1616 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 1617 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | /* TODO: use a hash or array, this sucks. */ | 
|  | 1619 | list_for_each(pos, &ctx->active_reqs) { | 
|  | 1620 | struct kiocb *kiocb = list_kiocb(pos); | 
|  | 1621 | if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) | 
|  | 1622 | return kiocb; | 
|  | 1623 | } | 
|  | 1624 | return NULL; | 
|  | 1625 | } | 
|  | 1626 |  | 
|  | 1627 | /* sys_io_cancel: | 
|  | 1628 | *	Attempts to cancel an iocb previously passed to io_submit.  If | 
|  | 1629 | *	the operation is successfully cancelled, the resulting event is | 
|  | 1630 | *	copied into the memory pointed to by result without being placed | 
|  | 1631 | *	into the completion queue and 0 is returned.  May fail with | 
|  | 1632 | *	-EFAULT if any of the data structures pointed to are invalid. | 
|  | 1633 | *	May fail with -EINVAL if aio_context specified by ctx_id is | 
|  | 1634 | *	invalid.  May fail with -EAGAIN if the iocb specified was not | 
|  | 1635 | *	cancelled.  Will fail with -ENOSYS if not implemented. | 
|  | 1636 | */ | 
|  | 1637 | asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, | 
|  | 1638 | struct io_event __user *result) | 
|  | 1639 | { | 
|  | 1640 | int (*cancel)(struct kiocb *iocb, struct io_event *res); | 
|  | 1641 | struct kioctx *ctx; | 
|  | 1642 | struct kiocb *kiocb; | 
|  | 1643 | u32 key; | 
|  | 1644 | int ret; | 
|  | 1645 |  | 
|  | 1646 | ret = get_user(key, &iocb->aio_key); | 
|  | 1647 | if (unlikely(ret)) | 
|  | 1648 | return -EFAULT; | 
|  | 1649 |  | 
|  | 1650 | ctx = lookup_ioctx(ctx_id); | 
|  | 1651 | if (unlikely(!ctx)) | 
|  | 1652 | return -EINVAL; | 
|  | 1653 |  | 
|  | 1654 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 1655 | ret = -EAGAIN; | 
|  | 1656 | kiocb = lookup_kiocb(ctx, iocb, key); | 
|  | 1657 | if (kiocb && kiocb->ki_cancel) { | 
|  | 1658 | cancel = kiocb->ki_cancel; | 
|  | 1659 | kiocb->ki_users ++; | 
|  | 1660 | kiocbSetCancelled(kiocb); | 
|  | 1661 | } else | 
|  | 1662 | cancel = NULL; | 
|  | 1663 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 1664 |  | 
|  | 1665 | if (NULL != cancel) { | 
|  | 1666 | struct io_event tmp; | 
|  | 1667 | pr_debug("calling cancel\n"); | 
|  | 1668 | memset(&tmp, 0, sizeof(tmp)); | 
|  | 1669 | tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; | 
|  | 1670 | tmp.data = kiocb->ki_user_data; | 
|  | 1671 | ret = cancel(kiocb, &tmp); | 
|  | 1672 | if (!ret) { | 
|  | 1673 | /* Cancellation succeeded -- copy the result | 
|  | 1674 | * into the user's buffer. | 
|  | 1675 | */ | 
|  | 1676 | if (copy_to_user(result, &tmp, sizeof(tmp))) | 
|  | 1677 | ret = -EFAULT; | 
|  | 1678 | } | 
|  | 1679 | } else | 
| Wendy Cheng | 8f58202 | 2005-09-09 13:02:08 -0700 | [diff] [blame] | 1680 | ret = -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1681 |  | 
|  | 1682 | put_ioctx(ctx); | 
|  | 1683 |  | 
|  | 1684 | return ret; | 
|  | 1685 | } | 
|  | 1686 |  | 
|  | 1687 | /* io_getevents: | 
|  | 1688 | *	Attempts to read at least min_nr events and up to nr events from | 
|  | 1689 | *	the completion queue for the aio_context specified by ctx_id.  May | 
|  | 1690 | *	fail with -EINVAL if ctx_id is invalid, if min_nr is out of range, | 
|  | 1691 | *	if nr is out of range, if when is out of range.  May fail with | 
|  | 1692 | *	-EFAULT if any of the memory specified to is invalid.  May return | 
|  | 1693 | *	0 or < min_nr if no events are available and the timeout specified | 
|  | 1694 | *	by when	has elapsed, where when == NULL specifies an infinite | 
|  | 1695 | *	timeout.  Note that the timeout pointed to by when is relative and | 
|  | 1696 | *	will be updated if not NULL and the operation blocks.  Will fail | 
|  | 1697 | *	with -ENOSYS if not implemented. | 
|  | 1698 | */ | 
|  | 1699 | asmlinkage long sys_io_getevents(aio_context_t ctx_id, | 
|  | 1700 | long min_nr, | 
|  | 1701 | long nr, | 
|  | 1702 | struct io_event __user *events, | 
|  | 1703 | struct timespec __user *timeout) | 
|  | 1704 | { | 
|  | 1705 | struct kioctx *ioctx = lookup_ioctx(ctx_id); | 
|  | 1706 | long ret = -EINVAL; | 
|  | 1707 |  | 
|  | 1708 | if (likely(ioctx)) { | 
|  | 1709 | if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0)) | 
|  | 1710 | ret = read_events(ioctx, min_nr, nr, events, timeout); | 
|  | 1711 | put_ioctx(ioctx); | 
|  | 1712 | } | 
|  | 1713 |  | 
|  | 1714 | return ret; | 
|  | 1715 | } | 
|  | 1716 |  | 
|  | 1717 | __initcall(aio_setup); | 
|  | 1718 |  | 
|  | 1719 | EXPORT_SYMBOL(aio_complete); | 
|  | 1720 | EXPORT_SYMBOL(aio_put_req); | 
|  | 1721 | EXPORT_SYMBOL(wait_on_sync_kiocb); |