| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *	An async IO implementation for Linux | 
|  | 3 | *	Written by Benjamin LaHaise <bcrl@kvack.org> | 
|  | 4 | * | 
|  | 5 | *	Implements an efficient asynchronous io interface. | 
|  | 6 | * | 
|  | 7 | *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved. | 
|  | 8 | * | 
|  | 9 | *	See ../COPYING for licensing terms. | 
|  | 10 | */ | 
|  | 11 | #include <linux/kernel.h> | 
|  | 12 | #include <linux/init.h> | 
|  | 13 | #include <linux/errno.h> | 
|  | 14 | #include <linux/time.h> | 
|  | 15 | #include <linux/aio_abi.h> | 
|  | 16 | #include <linux/module.h> | 
|  | 17 | #include <linux/syscalls.h> | 
| Jens Axboe | b9d128f | 2009-10-29 13:59:26 +0100 | [diff] [blame] | 18 | #include <linux/backing-dev.h> | 
| Badari Pulavarty | 027445c | 2006-09-30 23:28:46 -0700 | [diff] [blame] | 19 | #include <linux/uio.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  | 
|  | 21 | #define DEBUG 0 | 
|  | 22 |  | 
|  | 23 | #include <linux/sched.h> | 
|  | 24 | #include <linux/fs.h> | 
|  | 25 | #include <linux/file.h> | 
|  | 26 | #include <linux/mm.h> | 
|  | 27 | #include <linux/mman.h> | 
| Michael S. Tsirkin | 3d2d827 | 2009-09-21 17:03:51 -0700 | [diff] [blame] | 28 | #include <linux/mmu_context.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/slab.h> | 
|  | 30 | #include <linux/timer.h> | 
|  | 31 | #include <linux/aio.h> | 
|  | 32 | #include <linux/highmem.h> | 
|  | 33 | #include <linux/workqueue.h> | 
|  | 34 | #include <linux/security.h> | 
| Davide Libenzi | 9c3060b | 2007-05-10 22:23:21 -0700 | [diff] [blame] | 35 | #include <linux/eventfd.h> | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 36 | #include <linux/blkdev.h> | 
|  | 37 | #include <linux/mempool.h> | 
|  | 38 | #include <linux/hash.h> | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 39 | #include <linux/compat.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
|  | 41 | #include <asm/kmap_types.h> | 
|  | 42 | #include <asm/uaccess.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 |  | 
|  | 44 | #if DEBUG > 1 | 
|  | 45 | #define dprintk		printk | 
|  | 46 | #else | 
|  | 47 | #define dprintk(x...)	do { ; } while (0) | 
|  | 48 | #endif | 
|  | 49 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | /*------ sysctl variables----*/ | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 51 | static DEFINE_SPINLOCK(aio_nr_lock); | 
|  | 52 | unsigned long aio_nr;		/* current system wide number of aio requests */ | 
|  | 53 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | /*----end sysctl variables---*/ | 
|  | 55 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 56 | static struct kmem_cache	*kiocb_cachep; | 
|  | 57 | static struct kmem_cache	*kioctx_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 |  | 
|  | 59 | static struct workqueue_struct *aio_wq; | 
|  | 60 |  | 
|  | 61 | /* Used for rare fput completion. */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 62 | static void aio_fput_routine(struct work_struct *); | 
|  | 63 | static DECLARE_WORK(fput_work, aio_fput_routine); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 |  | 
|  | 65 | static DEFINE_SPINLOCK(fput_lock); | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 66 | static LIST_HEAD(fput_head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 |  | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 68 | #define AIO_BATCH_HASH_BITS	3 /* allocated on-stack, so don't go crazy */ | 
|  | 69 | #define AIO_BATCH_HASH_SIZE	(1 << AIO_BATCH_HASH_BITS) | 
|  | 70 | struct aio_batch_entry { | 
|  | 71 | struct hlist_node list; | 
|  | 72 | struct address_space *mapping; | 
|  | 73 | }; | 
|  | 74 | mempool_t *abe_pool; | 
|  | 75 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 76 | static void aio_kick_handler(struct work_struct *); | 
| Sébastien Dugu | c016e22 | 2005-06-28 20:44:59 -0700 | [diff] [blame] | 77 | static void aio_queue_work(struct kioctx *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 |  | 
|  | 79 | /* aio_setup | 
|  | 80 | *	Creates the slab caches used by the aio routines, panic on | 
|  | 81 | *	failure as this is done early during the boot sequence. | 
|  | 82 | */ | 
|  | 83 | static int __init aio_setup(void) | 
|  | 84 | { | 
| Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 85 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 
|  | 86 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 |  | 
|  | 88 | aio_wq = create_workqueue("aio"); | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 89 | abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); | 
|  | 90 | BUG_ON(!abe_pool); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 |  | 
|  | 92 | pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); | 
|  | 93 |  | 
|  | 94 | return 0; | 
|  | 95 | } | 
| H Hartley Sweeten | 385773e | 2009-09-22 16:43:53 -0700 | [diff] [blame] | 96 | __initcall(aio_setup); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 |  | 
|  | 98 | static void aio_free_ring(struct kioctx *ctx) | 
|  | 99 | { | 
|  | 100 | struct aio_ring_info *info = &ctx->ring_info; | 
|  | 101 | long i; | 
|  | 102 |  | 
|  | 103 | for (i=0; i<info->nr_pages; i++) | 
|  | 104 | put_page(info->ring_pages[i]); | 
|  | 105 |  | 
|  | 106 | if (info->mmap_size) { | 
|  | 107 | down_write(&ctx->mm->mmap_sem); | 
|  | 108 | do_munmap(ctx->mm, info->mmap_base, info->mmap_size); | 
|  | 109 | up_write(&ctx->mm->mmap_sem); | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | if (info->ring_pages && info->ring_pages != info->internal_pages) | 
|  | 113 | kfree(info->ring_pages); | 
|  | 114 | info->ring_pages = NULL; | 
|  | 115 | info->nr = 0; | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | static int aio_setup_ring(struct kioctx *ctx) | 
|  | 119 | { | 
|  | 120 | struct aio_ring *ring; | 
|  | 121 | struct aio_ring_info *info = &ctx->ring_info; | 
|  | 122 | unsigned nr_events = ctx->max_reqs; | 
|  | 123 | unsigned long size; | 
|  | 124 | int nr_pages; | 
|  | 125 |  | 
|  | 126 | /* Compensate for the ring buffer's head/tail overlap entry */ | 
|  | 127 | nr_events += 2;	/* 1 is required, 2 for good luck */ | 
|  | 128 |  | 
|  | 129 | size = sizeof(struct aio_ring); | 
|  | 130 | size += sizeof(struct io_event) * nr_events; | 
|  | 131 | nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; | 
|  | 132 |  | 
|  | 133 | if (nr_pages < 0) | 
|  | 134 | return -EINVAL; | 
|  | 135 |  | 
|  | 136 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); | 
|  | 137 |  | 
|  | 138 | info->nr = 0; | 
|  | 139 | info->ring_pages = info->internal_pages; | 
|  | 140 | if (nr_pages > AIO_RING_PAGES) { | 
| Oliver Neukum | 11b0b5a | 2006-03-25 03:08:13 -0800 | [diff] [blame] | 141 | info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | if (!info->ring_pages) | 
|  | 143 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | } | 
|  | 145 |  | 
|  | 146 | info->mmap_size = nr_pages * PAGE_SIZE; | 
|  | 147 | dprintk("attempting mmap of %lu bytes\n", info->mmap_size); | 
|  | 148 | down_write(&ctx->mm->mmap_sem); | 
|  | 149 | info->mmap_base = do_mmap(NULL, 0, info->mmap_size, | 
| Robert P. J. Day | e10a443 | 2007-02-10 01:42:56 -0800 | [diff] [blame] | 150 | PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | 0); | 
|  | 152 | if (IS_ERR((void *)info->mmap_base)) { | 
|  | 153 | up_write(&ctx->mm->mmap_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | info->mmap_size = 0; | 
|  | 155 | aio_free_ring(ctx); | 
|  | 156 | return -EAGAIN; | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | dprintk("mmap address: 0x%08lx\n", info->mmap_base); | 
|  | 160 | info->nr_pages = get_user_pages(current, ctx->mm, | 
|  | 161 | info->mmap_base, nr_pages, | 
|  | 162 | 1, 0, info->ring_pages, NULL); | 
|  | 163 | up_write(&ctx->mm->mmap_sem); | 
|  | 164 |  | 
|  | 165 | if (unlikely(info->nr_pages != nr_pages)) { | 
|  | 166 | aio_free_ring(ctx); | 
|  | 167 | return -EAGAIN; | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | ctx->user_id = info->mmap_base; | 
|  | 171 |  | 
|  | 172 | info->nr = nr_events;		/* trusted copy */ | 
|  | 173 |  | 
|  | 174 | ring = kmap_atomic(info->ring_pages[0], KM_USER0); | 
|  | 175 | ring->nr = nr_events;	/* user copy */ | 
|  | 176 | ring->id = ctx->user_id; | 
|  | 177 | ring->head = ring->tail = 0; | 
|  | 178 | ring->magic = AIO_RING_MAGIC; | 
|  | 179 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | 
|  | 180 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | 
|  | 181 | ring->header_length = sizeof(struct aio_ring); | 
|  | 182 | kunmap_atomic(ring, KM_USER0); | 
|  | 183 |  | 
|  | 184 | return 0; | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 |  | 
|  | 188 | /* aio_ring_event: returns a pointer to the event at the given index from | 
|  | 189 | * kmap_atomic(, km).  Release the pointer with put_aio_ring_event(); | 
|  | 190 | */ | 
|  | 191 | #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event)) | 
|  | 192 | #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | 
|  | 193 | #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | 
|  | 194 |  | 
|  | 195 | #define aio_ring_event(info, nr, km) ({					\ | 
|  | 196 | unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\ | 
|  | 197 | struct io_event *__event;					\ | 
|  | 198 | __event = kmap_atomic(						\ | 
|  | 199 | (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ | 
|  | 200 | __event += pos % AIO_EVENTS_PER_PAGE;				\ | 
|  | 201 | __event;							\ | 
|  | 202 | }) | 
|  | 203 |  | 
|  | 204 | #define put_aio_ring_event(event, km) do {	\ | 
|  | 205 | struct io_event *__event = (event);	\ | 
|  | 206 | (void)__event;				\ | 
|  | 207 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ | 
|  | 208 | } while(0) | 
|  | 209 |  | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 210 | static void ctx_rcu_free(struct rcu_head *head) | 
| Adrian Bunk | d5470b5 | 2008-04-29 00:58:57 -0700 | [diff] [blame] | 211 | { | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 212 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | 
| Adrian Bunk | d5470b5 | 2008-04-29 00:58:57 -0700 | [diff] [blame] | 213 | unsigned nr_events = ctx->max_reqs; | 
|  | 214 |  | 
| Adrian Bunk | d5470b5 | 2008-04-29 00:58:57 -0700 | [diff] [blame] | 215 | kmem_cache_free(kioctx_cachep, ctx); | 
|  | 216 |  | 
|  | 217 | if (nr_events) { | 
|  | 218 | spin_lock(&aio_nr_lock); | 
|  | 219 | BUG_ON(aio_nr - nr_events > aio_nr); | 
|  | 220 | aio_nr -= nr_events; | 
|  | 221 | spin_unlock(&aio_nr_lock); | 
|  | 222 | } | 
|  | 223 | } | 
|  | 224 |  | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 225 | /* __put_ioctx | 
|  | 226 | *	Called when the last user of an aio context has gone away, | 
|  | 227 | *	and the struct needs to be freed. | 
|  | 228 | */ | 
|  | 229 | static void __put_ioctx(struct kioctx *ctx) | 
|  | 230 | { | 
|  | 231 | BUG_ON(ctx->reqs_active); | 
|  | 232 |  | 
|  | 233 | cancel_delayed_work(&ctx->wq); | 
|  | 234 | cancel_work_sync(&ctx->wq.work); | 
|  | 235 | aio_free_ring(ctx); | 
|  | 236 | mmdrop(ctx->mm); | 
|  | 237 | ctx->mm = NULL; | 
|  | 238 | pr_debug("__put_ioctx: freeing %p\n", ctx); | 
|  | 239 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 
|  | 240 | } | 
|  | 241 |  | 
| Adrian Bunk | d5470b5 | 2008-04-29 00:58:57 -0700 | [diff] [blame] | 242 | #define get_ioctx(kioctx) do {						\ | 
|  | 243 | BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\ | 
|  | 244 | atomic_inc(&(kioctx)->users);					\ | 
|  | 245 | } while (0) | 
|  | 246 | #define put_ioctx(kioctx) do {						\ | 
|  | 247 | BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\ | 
|  | 248 | if (unlikely(atomic_dec_and_test(&(kioctx)->users))) 		\ | 
|  | 249 | __put_ioctx(kioctx);					\ | 
|  | 250 | } while (0) | 
|  | 251 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | /* ioctx_alloc | 
|  | 253 | *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed. | 
|  | 254 | */ | 
|  | 255 | static struct kioctx *ioctx_alloc(unsigned nr_events) | 
|  | 256 | { | 
|  | 257 | struct mm_struct *mm; | 
|  | 258 | struct kioctx *ctx; | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 259 | int did_sync = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 |  | 
|  | 261 | /* Prevent overflows */ | 
|  | 262 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | 
|  | 263 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { | 
|  | 264 | pr_debug("ENOMEM: nr_events too high\n"); | 
|  | 265 | return ERR_PTR(-EINVAL); | 
|  | 266 | } | 
|  | 267 |  | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 268 | if ((unsigned long)nr_events > aio_max_nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | return ERR_PTR(-EAGAIN); | 
|  | 270 |  | 
| Robert P. J. Day | c376222 | 2007-02-10 01:45:03 -0800 | [diff] [blame] | 271 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | if (!ctx) | 
|  | 273 | return ERR_PTR(-ENOMEM); | 
|  | 274 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | ctx->max_reqs = nr_events; | 
|  | 276 | mm = ctx->mm = current->mm; | 
|  | 277 | atomic_inc(&mm->mm_count); | 
|  | 278 |  | 
|  | 279 | atomic_set(&ctx->users, 1); | 
|  | 280 | spin_lock_init(&ctx->ctx_lock); | 
|  | 281 | spin_lock_init(&ctx->ring_info.ring_lock); | 
|  | 282 | init_waitqueue_head(&ctx->wait); | 
|  | 283 |  | 
|  | 284 | INIT_LIST_HEAD(&ctx->active_reqs); | 
|  | 285 | INIT_LIST_HEAD(&ctx->run_list); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 286 | INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 |  | 
|  | 288 | if (aio_setup_ring(ctx) < 0) | 
|  | 289 | goto out_freectx; | 
|  | 290 |  | 
|  | 291 | /* limit the number of system wide aios */ | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 292 | do { | 
|  | 293 | spin_lock_bh(&aio_nr_lock); | 
|  | 294 | if (aio_nr + nr_events > aio_max_nr || | 
|  | 295 | aio_nr + nr_events < aio_nr) | 
|  | 296 | ctx->max_reqs = 0; | 
|  | 297 | else | 
|  | 298 | aio_nr += ctx->max_reqs; | 
|  | 299 | spin_unlock_bh(&aio_nr_lock); | 
|  | 300 | if (ctx->max_reqs || did_sync) | 
|  | 301 | break; | 
|  | 302 |  | 
|  | 303 | /* wait for rcu callbacks to have completed before giving up */ | 
|  | 304 | synchronize_rcu(); | 
|  | 305 | did_sync = 1; | 
|  | 306 | ctx->max_reqs = nr_events; | 
|  | 307 | } while (1); | 
|  | 308 |  | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 309 | if (ctx->max_reqs == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | goto out_cleanup; | 
|  | 311 |  | 
| Jeff Moyer | 39fa003 | 2008-04-29 01:03:48 -0700 | [diff] [blame] | 312 | /* now link into global list. */ | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 313 | spin_lock(&mm->ioctx_lock); | 
|  | 314 | hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); | 
|  | 315 | spin_unlock(&mm->ioctx_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 |  | 
|  | 317 | dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", | 
|  | 318 | ctx, ctx->user_id, current->mm, ctx->ring_info.nr); | 
|  | 319 | return ctx; | 
|  | 320 |  | 
|  | 321 | out_cleanup: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | __put_ioctx(ctx); | 
|  | 323 | return ERR_PTR(-EAGAIN); | 
|  | 324 |  | 
|  | 325 | out_freectx: | 
|  | 326 | mmdrop(mm); | 
|  | 327 | kmem_cache_free(kioctx_cachep, ctx); | 
|  | 328 | ctx = ERR_PTR(-ENOMEM); | 
|  | 329 |  | 
|  | 330 | dprintk("aio: error allocating ioctx %p\n", ctx); | 
|  | 331 | return ctx; | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | /* aio_cancel_all | 
|  | 335 | *	Cancels all outstanding aio requests on an aio context.  Used | 
|  | 336 | *	when the processes owning a context have all exited to encourage | 
|  | 337 | *	the rapid destruction of the kioctx. | 
|  | 338 | */ | 
|  | 339 | static void aio_cancel_all(struct kioctx *ctx) | 
|  | 340 | { | 
|  | 341 | int (*cancel)(struct kiocb *, struct io_event *); | 
|  | 342 | struct io_event res; | 
|  | 343 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 344 | ctx->dead = 1; | 
|  | 345 | while (!list_empty(&ctx->active_reqs)) { | 
|  | 346 | struct list_head *pos = ctx->active_reqs.next; | 
|  | 347 | struct kiocb *iocb = list_kiocb(pos); | 
|  | 348 | list_del_init(&iocb->ki_list); | 
|  | 349 | cancel = iocb->ki_cancel; | 
|  | 350 | kiocbSetCancelled(iocb); | 
|  | 351 | if (cancel) { | 
|  | 352 | iocb->ki_users++; | 
|  | 353 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 354 | cancel(iocb, &res); | 
|  | 355 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 356 | } | 
|  | 357 | } | 
|  | 358 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 359 | } | 
|  | 360 |  | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 361 | static void wait_for_all_aios(struct kioctx *ctx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | { | 
|  | 363 | struct task_struct *tsk = current; | 
|  | 364 | DECLARE_WAITQUEUE(wait, tsk); | 
|  | 365 |  | 
| Ken Chen | dee11c2 | 2007-02-03 01:13:45 -0800 | [diff] [blame] | 366 | spin_lock_irq(&ctx->ctx_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | if (!ctx->reqs_active) | 
| Ken Chen | dee11c2 | 2007-02-03 01:13:45 -0800 | [diff] [blame] | 368 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 |  | 
|  | 370 | add_wait_queue(&ctx->wait, &wait); | 
|  | 371 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
|  | 372 | while (ctx->reqs_active) { | 
| Ken Chen | dee11c2 | 2007-02-03 01:13:45 -0800 | [diff] [blame] | 373 | spin_unlock_irq(&ctx->ctx_lock); | 
| Jeff Moyer | 41d10da | 2007-10-16 23:27:20 -0700 | [diff] [blame] | 374 | io_schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
| Ken Chen | dee11c2 | 2007-02-03 01:13:45 -0800 | [diff] [blame] | 376 | spin_lock_irq(&ctx->ctx_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | } | 
|  | 378 | __set_task_state(tsk, TASK_RUNNING); | 
|  | 379 | remove_wait_queue(&ctx->wait, &wait); | 
| Ken Chen | dee11c2 | 2007-02-03 01:13:45 -0800 | [diff] [blame] | 380 |  | 
|  | 381 | out: | 
|  | 382 | spin_unlock_irq(&ctx->ctx_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | } | 
|  | 384 |  | 
|  | 385 | /* wait_on_sync_kiocb: | 
|  | 386 | *	Waits on the given sync kiocb to complete. | 
|  | 387 | */ | 
| Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 388 | ssize_t wait_on_sync_kiocb(struct kiocb *iocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | { | 
|  | 390 | while (iocb->ki_users) { | 
|  | 391 | set_current_state(TASK_UNINTERRUPTIBLE); | 
|  | 392 | if (!iocb->ki_users) | 
|  | 393 | break; | 
| Jeff Moyer | 41d10da | 2007-10-16 23:27:20 -0700 | [diff] [blame] | 394 | io_schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | } | 
|  | 396 | __set_current_state(TASK_RUNNING); | 
|  | 397 | return iocb->ki_user_data; | 
|  | 398 | } | 
| H Hartley Sweeten | 385773e | 2009-09-22 16:43:53 -0700 | [diff] [blame] | 399 | EXPORT_SYMBOL(wait_on_sync_kiocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 |  | 
|  | 401 | /* exit_aio: called when the last user of mm goes away.  At this point, | 
|  | 402 | * there is no way for any new requests to be submited or any of the | 
|  | 403 | * io_* syscalls to be called on the context.  However, there may be | 
|  | 404 | * outstanding requests which hold references to the context; as they | 
|  | 405 | * go away, they will call put_ioctx and release any pinned memory | 
|  | 406 | * associated with the request (held via struct page * references). | 
|  | 407 | */ | 
| Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 408 | void exit_aio(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | { | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 410 | struct kioctx *ctx; | 
|  | 411 |  | 
|  | 412 | while (!hlist_empty(&mm->ioctx_list)) { | 
|  | 413 | ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); | 
|  | 414 | hlist_del_rcu(&ctx->list); | 
|  | 415 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | aio_cancel_all(ctx); | 
|  | 417 |  | 
|  | 418 | wait_for_all_aios(ctx); | 
|  | 419 | /* | 
| Andrew Morton | a9df62c | 2007-05-09 02:33:54 -0700 | [diff] [blame] | 420 | * Ensure we don't leave the ctx on the aio_wq | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | */ | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 422 | cancel_work_sync(&ctx->wq.work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 |  | 
|  | 424 | if (1 != atomic_read(&ctx->users)) | 
|  | 425 | printk(KERN_DEBUG | 
|  | 426 | "exit_aio:ioctx still alive: %d %d %d\n", | 
|  | 427 | atomic_read(&ctx->users), ctx->dead, | 
|  | 428 | ctx->reqs_active); | 
|  | 429 | put_ioctx(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | } | 
|  | 431 | } | 
|  | 432 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | /* aio_get_req | 
|  | 434 | *	Allocate a slot for an aio request.  Increments the users count | 
|  | 435 | * of the kioctx so that the kioctx stays around until all requests are | 
|  | 436 | * complete.  Returns NULL if no requests are free. | 
|  | 437 | * | 
|  | 438 | * Returns with kiocb->users set to 2.  The io submit code path holds | 
|  | 439 | * an extra reference while submitting the i/o. | 
|  | 440 | * This prevents races between the aio code path referencing the | 
|  | 441 | * req (after submitting it) and aio_complete() freeing the req. | 
|  | 442 | */ | 
| Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 443 | static struct kiocb *__aio_get_req(struct kioctx *ctx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | { | 
|  | 445 | struct kiocb *req = NULL; | 
|  | 446 | struct aio_ring *ring; | 
|  | 447 | int okay = 0; | 
|  | 448 |  | 
|  | 449 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); | 
|  | 450 | if (unlikely(!req)) | 
|  | 451 | return NULL; | 
|  | 452 |  | 
| Zach Brown | 4faa528 | 2005-10-17 16:43:33 -0700 | [diff] [blame] | 453 | req->ki_flags = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | req->ki_users = 2; | 
|  | 455 | req->ki_key = 0; | 
|  | 456 | req->ki_ctx = ctx; | 
|  | 457 | req->ki_cancel = NULL; | 
|  | 458 | req->ki_retry = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | req->ki_dtor = NULL; | 
|  | 460 | req->private = NULL; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 461 | req->ki_iovec = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | INIT_LIST_HEAD(&req->ki_run_list); | 
| Davide Libenzi | 87c3a86 | 2009-03-18 17:04:19 -0700 | [diff] [blame] | 463 | req->ki_eventfd = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 |  | 
|  | 465 | /* Check if the completion queue has enough free space to | 
|  | 466 | * accept an event from this io. | 
|  | 467 | */ | 
|  | 468 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 469 | ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); | 
|  | 470 | if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { | 
|  | 471 | list_add(&req->ki_list, &ctx->active_reqs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | ctx->reqs_active++; | 
|  | 473 | okay = 1; | 
|  | 474 | } | 
|  | 475 | kunmap_atomic(ring, KM_USER0); | 
|  | 476 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 477 |  | 
|  | 478 | if (!okay) { | 
|  | 479 | kmem_cache_free(kiocb_cachep, req); | 
|  | 480 | req = NULL; | 
|  | 481 | } | 
|  | 482 |  | 
|  | 483 | return req; | 
|  | 484 | } | 
|  | 485 |  | 
|  | 486 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) | 
|  | 487 | { | 
|  | 488 | struct kiocb *req; | 
|  | 489 | /* Handle a potential starvation case -- should be exceedingly rare as | 
|  | 490 | * requests will be stuck on fput_head only if the aio_fput_routine is | 
|  | 491 | * delayed and the requests were the last user of the struct file. | 
|  | 492 | */ | 
|  | 493 | req = __aio_get_req(ctx); | 
|  | 494 | if (unlikely(NULL == req)) { | 
|  | 495 | aio_fput_routine(NULL); | 
|  | 496 | req = __aio_get_req(ctx); | 
|  | 497 | } | 
|  | 498 | return req; | 
|  | 499 | } | 
|  | 500 |  | 
|  | 501 | static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | 
|  | 502 | { | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 503 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 504 |  | 
| Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 505 | if (req->ki_eventfd != NULL) | 
|  | 506 | eventfd_ctx_put(req->ki_eventfd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | if (req->ki_dtor) | 
|  | 508 | req->ki_dtor(req); | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 509 | if (req->ki_iovec != &req->ki_inline_vec) | 
|  | 510 | kfree(req->ki_iovec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | kmem_cache_free(kiocb_cachep, req); | 
|  | 512 | ctx->reqs_active--; | 
|  | 513 |  | 
|  | 514 | if (unlikely(!ctx->reqs_active && ctx->dead)) | 
|  | 515 | wake_up(&ctx->wait); | 
|  | 516 | } | 
|  | 517 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 518 | static void aio_fput_routine(struct work_struct *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | { | 
|  | 520 | spin_lock_irq(&fput_lock); | 
|  | 521 | while (likely(!list_empty(&fput_head))) { | 
|  | 522 | struct kiocb *req = list_kiocb(fput_head.next); | 
|  | 523 | struct kioctx *ctx = req->ki_ctx; | 
|  | 524 |  | 
|  | 525 | list_del(&req->ki_list); | 
|  | 526 | spin_unlock_irq(&fput_lock); | 
|  | 527 |  | 
| Davide Libenzi | 87c3a86 | 2009-03-18 17:04:19 -0700 | [diff] [blame] | 528 | /* Complete the fput(s) */ | 
|  | 529 | if (req->ki_filp != NULL) | 
| Al Viro | d7065da | 2010-05-26 15:13:55 -0400 | [diff] [blame] | 530 | fput(req->ki_filp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 |  | 
|  | 532 | /* Link the iocb into the context's free list */ | 
|  | 533 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 534 | really_put_req(ctx, req); | 
|  | 535 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 536 |  | 
|  | 537 | put_ioctx(ctx); | 
|  | 538 | spin_lock_irq(&fput_lock); | 
|  | 539 | } | 
|  | 540 | spin_unlock_irq(&fput_lock); | 
|  | 541 | } | 
|  | 542 |  | 
|  | 543 | /* __aio_put_req | 
|  | 544 | *	Returns true if this put was the last user of the request. | 
|  | 545 | */ | 
|  | 546 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | 
|  | 547 | { | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 548 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", | 
|  | 549 | req, atomic_long_read(&req->ki_filp->f_count)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 551 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 552 |  | 
| Davide Libenzi | 87c3a86 | 2009-03-18 17:04:19 -0700 | [diff] [blame] | 553 | req->ki_users--; | 
| Eric Sesterhenn | 93e06b4 | 2006-11-30 05:29:23 +0100 | [diff] [blame] | 554 | BUG_ON(req->ki_users < 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | if (likely(req->ki_users)) | 
|  | 556 | return 0; | 
|  | 557 | list_del(&req->ki_list);		/* remove from active_reqs */ | 
|  | 558 | req->ki_cancel = NULL; | 
|  | 559 | req->ki_retry = NULL; | 
|  | 560 |  | 
| Davide Libenzi | 87c3a86 | 2009-03-18 17:04:19 -0700 | [diff] [blame] | 561 | /* | 
|  | 562 | * Try to optimize the aio and eventfd file* puts, by avoiding to | 
| Al Viro | d7065da | 2010-05-26 15:13:55 -0400 | [diff] [blame] | 563 | * schedule work in case it is not final fput() time. In normal cases, | 
| Davide Libenzi | 87c3a86 | 2009-03-18 17:04:19 -0700 | [diff] [blame] | 564 | * we would not be holding the last reference to the file*, so | 
|  | 565 | * this function will be executed w/out any aio kthread wakeup. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | */ | 
| Al Viro | d7065da | 2010-05-26 15:13:55 -0400 | [diff] [blame] | 567 | if (unlikely(!fput_atomic(req->ki_filp))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | get_ioctx(ctx); | 
|  | 569 | spin_lock(&fput_lock); | 
|  | 570 | list_add(&req->ki_list, &fput_head); | 
|  | 571 | spin_unlock(&fput_lock); | 
|  | 572 | queue_work(aio_wq, &fput_work); | 
| Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 573 | } else { | 
|  | 574 | req->ki_filp = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | really_put_req(ctx, req); | 
| Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 576 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | return 1; | 
|  | 578 | } | 
|  | 579 |  | 
|  | 580 | /* aio_put_req | 
|  | 581 | *	Returns true if this put was the last user of the kiocb, | 
|  | 582 | *	false if the request is still in use. | 
|  | 583 | */ | 
| Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 584 | int aio_put_req(struct kiocb *req) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | { | 
|  | 586 | struct kioctx *ctx = req->ki_ctx; | 
|  | 587 | int ret; | 
|  | 588 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 589 | ret = __aio_put_req(ctx, req); | 
|  | 590 | spin_unlock_irq(&ctx->ctx_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | return ret; | 
|  | 592 | } | 
| H Hartley Sweeten | 385773e | 2009-09-22 16:43:53 -0700 | [diff] [blame] | 593 | EXPORT_SYMBOL(aio_put_req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 |  | 
| Adrian Bunk | d5470b5 | 2008-04-29 00:58:57 -0700 | [diff] [blame] | 595 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | { | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 597 | struct mm_struct *mm = current->mm; | 
| Jeff Moyer | 65c2449 | 2009-03-18 17:04:21 -0700 | [diff] [blame] | 598 | struct kioctx *ctx, *ret = NULL; | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 599 | struct hlist_node *n; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 |  | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 601 | rcu_read_lock(); | 
|  | 602 |  | 
|  | 603 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { | 
|  | 604 | if (ctx->user_id == ctx_id && !ctx->dead) { | 
|  | 605 | get_ioctx(ctx); | 
| Jeff Moyer | 65c2449 | 2009-03-18 17:04:21 -0700 | [diff] [blame] | 606 | ret = ctx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | break; | 
|  | 608 | } | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 609 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 |  | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 611 | rcu_read_unlock(); | 
| Jeff Moyer | 65c2449 | 2009-03-18 17:04:21 -0700 | [diff] [blame] | 612 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | } | 
|  | 614 |  | 
|  | 615 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | * Queue up a kiocb to be retried. Assumes that the kiocb | 
|  | 617 | * has already been marked as kicked, and places it on | 
|  | 618 | * the retry run list for the corresponding ioctx, if it | 
|  | 619 | * isn't already queued. Returns 1 if it actually queued | 
|  | 620 | * the kiocb (to tell the caller to activate the work | 
|  | 621 | * queue to process it), or 0, if it found that it was | 
|  | 622 | * already queued. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | */ | 
|  | 624 | static inline int __queue_kicked_iocb(struct kiocb *iocb) | 
|  | 625 | { | 
|  | 626 | struct kioctx *ctx = iocb->ki_ctx; | 
|  | 627 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 628 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 629 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | if (list_empty(&iocb->ki_run_list)) { | 
|  | 631 | list_add_tail(&iocb->ki_run_list, | 
|  | 632 | &ctx->run_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | return 1; | 
|  | 634 | } | 
|  | 635 | return 0; | 
|  | 636 | } | 
|  | 637 |  | 
|  | 638 | /* aio_run_iocb | 
|  | 639 | *	This is the core aio execution routine. It is | 
|  | 640 | *	invoked both for initial i/o submission and | 
|  | 641 | *	subsequent retries via the aio_kick_handler. | 
|  | 642 | *	Expects to be invoked with iocb->ki_ctx->lock | 
| Andreas Mohr | d6e05ed | 2006-06-26 18:35:02 +0200 | [diff] [blame] | 643 | *	already held. The lock is released and reacquired | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | *	as needed during processing. | 
|  | 645 | * | 
|  | 646 | * Calls the iocb retry method (already setup for the | 
|  | 647 | * iocb on initial submission) for operation specific | 
|  | 648 | * handling, but takes care of most of common retry | 
|  | 649 | * execution details for a given iocb. The retry method | 
|  | 650 | * needs to be non-blocking as far as possible, to avoid | 
|  | 651 | * holding up other iocbs waiting to be serviced by the | 
|  | 652 | * retry kernel thread. | 
|  | 653 | * | 
|  | 654 | * The trickier parts in this code have to do with | 
|  | 655 | * ensuring that only one retry instance is in progress | 
|  | 656 | * for a given iocb at any time. Providing that guarantee | 
|  | 657 | * simplifies the coding of individual aio operations as | 
|  | 658 | * it avoids various potential races. | 
|  | 659 | */ | 
|  | 660 | static ssize_t aio_run_iocb(struct kiocb *iocb) | 
|  | 661 | { | 
|  | 662 | struct kioctx	*ctx = iocb->ki_ctx; | 
|  | 663 | ssize_t (*retry)(struct kiocb *); | 
|  | 664 | ssize_t ret; | 
|  | 665 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | if (!(retry = iocb->ki_retry)) { | 
|  | 667 | printk("aio_run_iocb: iocb->ki_retry = NULL\n"); | 
|  | 668 | return 0; | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | /* | 
|  | 672 | * We don't want the next retry iteration for this | 
|  | 673 | * operation to start until this one has returned and | 
|  | 674 | * updated the iocb state. However, wait_queue functions | 
|  | 675 | * can trigger a kick_iocb from interrupt context in the | 
|  | 676 | * meantime, indicating that data is available for the next | 
|  | 677 | * iteration. We want to remember that and enable the | 
|  | 678 | * next retry iteration _after_ we are through with | 
|  | 679 | * this one. | 
|  | 680 | * | 
|  | 681 | * So, in order to be able to register a "kick", but | 
|  | 682 | * prevent it from being queued now, we clear the kick | 
|  | 683 | * flag, but make the kick code *think* that the iocb is | 
|  | 684 | * still on the run list until we are actually done. | 
|  | 685 | * When we are done with this iteration, we check if | 
|  | 686 | * the iocb was kicked in the meantime and if so, queue | 
|  | 687 | * it up afresh. | 
|  | 688 | */ | 
|  | 689 |  | 
|  | 690 | kiocbClearKicked(iocb); | 
|  | 691 |  | 
|  | 692 | /* | 
|  | 693 | * This is so that aio_complete knows it doesn't need to | 
|  | 694 | * pull the iocb off the run list (We can't just call | 
|  | 695 | * INIT_LIST_HEAD because we don't want a kick_iocb to | 
|  | 696 | * queue this on the run list yet) | 
|  | 697 | */ | 
|  | 698 | iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; | 
|  | 699 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 700 |  | 
|  | 701 | /* Quit retrying if the i/o has been cancelled */ | 
|  | 702 | if (kiocbIsCancelled(iocb)) { | 
|  | 703 | ret = -EINTR; | 
|  | 704 | aio_complete(iocb, ret, 0); | 
|  | 705 | /* must not access the iocb after this */ | 
|  | 706 | goto out; | 
|  | 707 | } | 
|  | 708 |  | 
|  | 709 | /* | 
|  | 710 | * Now we are all set to call the retry method in async | 
| Alexey Dobriyan | 6212e3a | 2007-10-18 03:04:56 -0700 | [diff] [blame] | 711 | * context. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | ret = retry(iocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 |  | 
| Jan Kara | a0c42ba | 2010-09-22 13:05:03 -0700 | [diff] [blame] | 715 | if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { | 
|  | 716 | /* | 
|  | 717 | * There's no easy way to restart the syscall since other AIO's | 
|  | 718 | * may be already running. Just fail this IO with EINTR. | 
|  | 719 | */ | 
|  | 720 | if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || | 
|  | 721 | ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) | 
|  | 722 | ret = -EINTR; | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 723 | aio_complete(iocb, ret, 0); | 
| Jan Kara | a0c42ba | 2010-09-22 13:05:03 -0700 | [diff] [blame] | 724 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | out: | 
|  | 726 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 727 |  | 
|  | 728 | if (-EIOCBRETRY == ret) { | 
|  | 729 | /* | 
|  | 730 | * OK, now that we are done with this iteration | 
|  | 731 | * and know that there is more left to go, | 
|  | 732 | * this is where we let go so that a subsequent | 
|  | 733 | * "kick" can start the next iteration | 
|  | 734 | */ | 
|  | 735 |  | 
|  | 736 | /* will make __queue_kicked_iocb succeed from here on */ | 
|  | 737 | INIT_LIST_HEAD(&iocb->ki_run_list); | 
|  | 738 | /* we must queue the next iteration ourselves, if it | 
|  | 739 | * has already been kicked */ | 
|  | 740 | if (kiocbIsKicked(iocb)) { | 
|  | 741 | __queue_kicked_iocb(iocb); | 
| Sébastien Dugu | c016e22 | 2005-06-28 20:44:59 -0700 | [diff] [blame] | 742 |  | 
|  | 743 | /* | 
|  | 744 | * __queue_kicked_iocb will always return 1 here, because | 
|  | 745 | * iocb->ki_run_list is empty at this point so it should | 
|  | 746 | * be safe to unconditionally queue the context into the | 
|  | 747 | * work queue. | 
|  | 748 | */ | 
|  | 749 | aio_queue_work(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | } | 
|  | 751 | } | 
|  | 752 | return ret; | 
|  | 753 | } | 
|  | 754 |  | 
|  | 755 | /* | 
|  | 756 | * __aio_run_iocbs: | 
|  | 757 | * 	Process all pending retries queued on the ioctx | 
|  | 758 | * 	run list. | 
|  | 759 | * Assumes it is operating within the aio issuer's mm | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 760 | * context. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | */ | 
|  | 762 | static int __aio_run_iocbs(struct kioctx *ctx) | 
|  | 763 | { | 
|  | 764 | struct kiocb *iocb; | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 765 | struct list_head run_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 767 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 768 |  | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 769 | list_replace_init(&ctx->run_list, &run_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | while (!list_empty(&run_list)) { | 
|  | 771 | iocb = list_entry(run_list.next, struct kiocb, | 
|  | 772 | ki_run_list); | 
|  | 773 | list_del(&iocb->ki_run_list); | 
|  | 774 | /* | 
|  | 775 | * Hold an extra reference while retrying i/o. | 
|  | 776 | */ | 
|  | 777 | iocb->ki_users++;       /* grab extra reference */ | 
|  | 778 | aio_run_iocb(iocb); | 
| Ken Chen | dee11c2 | 2007-02-03 01:13:45 -0800 | [diff] [blame] | 779 | __aio_put_req(ctx, iocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | if (!list_empty(&ctx->run_list)) | 
|  | 782 | return 1; | 
|  | 783 | return 0; | 
|  | 784 | } | 
|  | 785 |  | 
|  | 786 | static void aio_queue_work(struct kioctx * ctx) | 
|  | 787 | { | 
|  | 788 | unsigned long timeout; | 
|  | 789 | /* | 
|  | 790 | * if someone is waiting, get the work started right | 
|  | 791 | * away, otherwise, use a longer delay | 
|  | 792 | */ | 
|  | 793 | smp_mb(); | 
|  | 794 | if (waitqueue_active(&ctx->wait)) | 
|  | 795 | timeout = 1; | 
|  | 796 | else | 
|  | 797 | timeout = HZ/10; | 
|  | 798 | queue_delayed_work(aio_wq, &ctx->wq, timeout); | 
|  | 799 | } | 
|  | 800 |  | 
|  | 801 |  | 
|  | 802 | /* | 
|  | 803 | * aio_run_iocbs: | 
|  | 804 | * 	Process all pending retries queued on the ioctx | 
|  | 805 | * 	run list. | 
|  | 806 | * Assumes it is operating within the aio issuer's mm | 
|  | 807 | * context. | 
|  | 808 | */ | 
|  | 809 | static inline void aio_run_iocbs(struct kioctx *ctx) | 
|  | 810 | { | 
|  | 811 | int requeue; | 
|  | 812 |  | 
|  | 813 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 814 |  | 
|  | 815 | requeue = __aio_run_iocbs(ctx); | 
|  | 816 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 817 | if (requeue) | 
|  | 818 | aio_queue_work(ctx); | 
|  | 819 | } | 
|  | 820 |  | 
|  | 821 | /* | 
|  | 822 | * just like aio_run_iocbs, but keeps running them until | 
|  | 823 | * the list stays empty | 
|  | 824 | */ | 
|  | 825 | static inline void aio_run_all_iocbs(struct kioctx *ctx) | 
|  | 826 | { | 
|  | 827 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 828 | while (__aio_run_iocbs(ctx)) | 
|  | 829 | ; | 
|  | 830 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 831 | } | 
|  | 832 |  | 
|  | 833 | /* | 
|  | 834 | * aio_kick_handler: | 
|  | 835 | * 	Work queue handler triggered to process pending | 
|  | 836 | * 	retries on an ioctx. Takes on the aio issuer's | 
|  | 837 | *	mm context before running the iocbs, so that | 
|  | 838 | *	copy_xxx_user operates on the issuer's address | 
|  | 839 | *      space. | 
|  | 840 | * Run on aiod's context. | 
|  | 841 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 842 | static void aio_kick_handler(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 844 | struct kioctx *ctx = container_of(work, struct kioctx, wq.work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | mm_segment_t oldfs = get_fs(); | 
| Zach Brown | 1ebb110 | 2006-12-29 16:47:02 -0800 | [diff] [blame] | 846 | struct mm_struct *mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | int requeue; | 
|  | 848 |  | 
|  | 849 | set_fs(USER_DS); | 
|  | 850 | use_mm(ctx->mm); | 
|  | 851 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 852 | requeue =__aio_run_iocbs(ctx); | 
| Zach Brown | 1ebb110 | 2006-12-29 16:47:02 -0800 | [diff] [blame] | 853 | mm = ctx->mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | spin_unlock_irq(&ctx->ctx_lock); | 
| Zach Brown | 1ebb110 | 2006-12-29 16:47:02 -0800 | [diff] [blame] | 855 | unuse_mm(mm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | set_fs(oldfs); | 
|  | 857 | /* | 
|  | 858 | * we're in a worker thread already, don't use queue_delayed_work, | 
|  | 859 | */ | 
|  | 860 | if (requeue) | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 861 | queue_delayed_work(aio_wq, &ctx->wq, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | } | 
|  | 863 |  | 
|  | 864 |  | 
|  | 865 | /* | 
|  | 866 | * Called by kick_iocb to queue the kiocb for retry | 
|  | 867 | * and if required activate the aio work queue to process | 
|  | 868 | * it | 
|  | 869 | */ | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 870 | static void try_queue_kicked_iocb(struct kiocb *iocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | { | 
|  | 872 | struct kioctx	*ctx = iocb->ki_ctx; | 
|  | 873 | unsigned long flags; | 
|  | 874 | int run = 0; | 
|  | 875 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | spin_lock_irqsave(&ctx->ctx_lock, flags); | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 877 | /* set this inside the lock so that we can't race with aio_run_iocb() | 
|  | 878 | * testing it and putting the iocb on the run list under the lock */ | 
|  | 879 | if (!kiocbTryKick(iocb)) | 
|  | 880 | run = __queue_kicked_iocb(iocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | 
| Ken Chen | 644d3a0 | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 882 | if (run) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | aio_queue_work(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | } | 
|  | 885 |  | 
|  | 886 | /* | 
|  | 887 | * kick_iocb: | 
|  | 888 | *      Called typically from a wait queue callback context | 
| Shaohua Li | fac046a | 2009-12-15 16:47:47 -0800 | [diff] [blame] | 889 | *      to trigger a retry of the iocb. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | *      The retry is usually executed by aio workqueue | 
|  | 891 | *      threads (See aio_kick_handler). | 
|  | 892 | */ | 
| Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 893 | void kick_iocb(struct kiocb *iocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | { | 
|  | 895 | /* sync iocbs are easy: they can only ever be executing from a | 
|  | 896 | * single context. */ | 
|  | 897 | if (is_sync_kiocb(iocb)) { | 
|  | 898 | kiocbSetKicked(iocb); | 
|  | 899 | wake_up_process(iocb->ki_obj.tsk); | 
|  | 900 | return; | 
|  | 901 | } | 
|  | 902 |  | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 903 | try_queue_kicked_iocb(iocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | } | 
|  | 905 | EXPORT_SYMBOL(kick_iocb); | 
|  | 906 |  | 
|  | 907 | /* aio_complete | 
|  | 908 | *	Called when the io request on the given iocb is complete. | 
|  | 909 | *	Returns true if this is the last user of the request.  The | 
|  | 910 | *	only other user of the request can be the cancellation code. | 
|  | 911 | */ | 
| Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 912 | int aio_complete(struct kiocb *iocb, long res, long res2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | { | 
|  | 914 | struct kioctx	*ctx = iocb->ki_ctx; | 
|  | 915 | struct aio_ring_info	*info; | 
|  | 916 | struct aio_ring	*ring; | 
|  | 917 | struct io_event	*event; | 
|  | 918 | unsigned long	flags; | 
|  | 919 | unsigned long	tail; | 
|  | 920 | int		ret; | 
|  | 921 |  | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 922 | /* | 
|  | 923 | * Special case handling for sync iocbs: | 
|  | 924 | *  - events go directly into the iocb for fast handling | 
|  | 925 | *  - the sync task with the iocb in its stack holds the single iocb | 
|  | 926 | *    ref, no other paths have a way to get another ref | 
|  | 927 | *  - the sync task helpfully left a reference to itself in the iocb | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | */ | 
|  | 929 | if (is_sync_kiocb(iocb)) { | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 930 | BUG_ON(iocb->ki_users != 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | iocb->ki_user_data = res; | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 932 | iocb->ki_users = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | wake_up_process(iocb->ki_obj.tsk); | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 934 | return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | } | 
|  | 936 |  | 
|  | 937 | info = &ctx->ring_info; | 
|  | 938 |  | 
|  | 939 | /* add a completion event to the ring buffer. | 
|  | 940 | * must be done holding ctx->ctx_lock to prevent | 
|  | 941 | * other code from messing with the tail | 
|  | 942 | * pointer since we might be called from irq | 
|  | 943 | * context. | 
|  | 944 | */ | 
|  | 945 | spin_lock_irqsave(&ctx->ctx_lock, flags); | 
|  | 946 |  | 
|  | 947 | if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) | 
|  | 948 | list_del_init(&iocb->ki_run_list); | 
|  | 949 |  | 
|  | 950 | /* | 
|  | 951 | * cancelled requests don't get events, userland was given one | 
|  | 952 | * when the event got cancelled. | 
|  | 953 | */ | 
|  | 954 | if (kiocbIsCancelled(iocb)) | 
|  | 955 | goto put_rq; | 
|  | 956 |  | 
|  | 957 | ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); | 
|  | 958 |  | 
|  | 959 | tail = info->tail; | 
|  | 960 | event = aio_ring_event(info, tail, KM_IRQ0); | 
| Ken Chen | 4bf69b2 | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 961 | if (++tail >= info->nr) | 
|  | 962 | tail = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 |  | 
|  | 964 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; | 
|  | 965 | event->data = iocb->ki_user_data; | 
|  | 966 | event->res = res; | 
|  | 967 | event->res2 = res2; | 
|  | 968 |  | 
|  | 969 | dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", | 
|  | 970 | ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, | 
|  | 971 | res, res2); | 
|  | 972 |  | 
|  | 973 | /* after flagging the request as done, we | 
|  | 974 | * must never even look at it again | 
|  | 975 | */ | 
|  | 976 | smp_wmb();	/* make event visible before updating tail */ | 
|  | 977 |  | 
|  | 978 | info->tail = tail; | 
|  | 979 | ring->tail = tail; | 
|  | 980 |  | 
|  | 981 | put_aio_ring_event(event, KM_IRQ0); | 
|  | 982 | kunmap_atomic(ring, KM_IRQ1); | 
|  | 983 |  | 
|  | 984 | pr_debug("added to ring %p at [%lu]\n", iocb, tail); | 
| Davide Libenzi | 8d1c98b | 2008-04-10 21:29:19 -0700 | [diff] [blame] | 985 |  | 
|  | 986 | /* | 
|  | 987 | * Check if the user asked us to deliver the result through an | 
|  | 988 | * eventfd. The eventfd_signal() function is safe to be called | 
|  | 989 | * from IRQ context. | 
|  | 990 | */ | 
| Davide Libenzi | 87c3a86 | 2009-03-18 17:04:19 -0700 | [diff] [blame] | 991 | if (iocb->ki_eventfd != NULL) | 
| Davide Libenzi | 8d1c98b | 2008-04-10 21:29:19 -0700 | [diff] [blame] | 992 | eventfd_signal(iocb->ki_eventfd, 1); | 
|  | 993 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 | put_rq: | 
|  | 995 | /* everything turned out well, dispose of the aiocb. */ | 
|  | 996 | ret = __aio_put_req(ctx, iocb); | 
|  | 997 |  | 
| Quentin Barnes | 6cb2a21 | 2008-03-19 17:00:39 -0700 | [diff] [blame] | 998 | /* | 
|  | 999 | * We have to order our ring_info tail store above and test | 
|  | 1000 | * of the wait list below outside the wait lock.  This is | 
|  | 1001 | * like in wake_up_bit() where clearing a bit has to be | 
|  | 1002 | * ordered with the unlocked test. | 
|  | 1003 | */ | 
|  | 1004 | smp_mb(); | 
|  | 1005 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | if (waitqueue_active(&ctx->wait)) | 
|  | 1007 | wake_up(&ctx->wait); | 
|  | 1008 |  | 
| Ken Chen | dee11c2 | 2007-02-03 01:13:45 -0800 | [diff] [blame] | 1009 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | return ret; | 
|  | 1011 | } | 
| H Hartley Sweeten | 385773e | 2009-09-22 16:43:53 -0700 | [diff] [blame] | 1012 | EXPORT_SYMBOL(aio_complete); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 |  | 
|  | 1014 | /* aio_read_evt | 
|  | 1015 | *	Pull an event off of the ioctx's event ring.  Returns the number of | 
|  | 1016 | *	events fetched (0 or 1 ;-) | 
|  | 1017 | *	FIXME: make this use cmpxchg. | 
|  | 1018 | *	TODO: make the ringbuffer user mmap()able (requires FIXME). | 
|  | 1019 | */ | 
|  | 1020 | static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) | 
|  | 1021 | { | 
|  | 1022 | struct aio_ring_info *info = &ioctx->ring_info; | 
|  | 1023 | struct aio_ring *ring; | 
|  | 1024 | unsigned long head; | 
|  | 1025 | int ret = 0; | 
|  | 1026 |  | 
|  | 1027 | ring = kmap_atomic(info->ring_pages[0], KM_USER0); | 
|  | 1028 | dprintk("in aio_read_evt h%lu t%lu m%lu\n", | 
|  | 1029 | (unsigned long)ring->head, (unsigned long)ring->tail, | 
|  | 1030 | (unsigned long)ring->nr); | 
|  | 1031 |  | 
|  | 1032 | if (ring->head == ring->tail) | 
|  | 1033 | goto out; | 
|  | 1034 |  | 
|  | 1035 | spin_lock(&info->ring_lock); | 
|  | 1036 |  | 
|  | 1037 | head = ring->head % info->nr; | 
|  | 1038 | if (head != ring->tail) { | 
|  | 1039 | struct io_event *evp = aio_ring_event(info, head, KM_USER1); | 
|  | 1040 | *ent = *evp; | 
|  | 1041 | head = (head + 1) % info->nr; | 
|  | 1042 | smp_mb(); /* finish reading the event before updatng the head */ | 
|  | 1043 | ring->head = head; | 
|  | 1044 | ret = 1; | 
|  | 1045 | put_aio_ring_event(evp, KM_USER1); | 
|  | 1046 | } | 
|  | 1047 | spin_unlock(&info->ring_lock); | 
|  | 1048 |  | 
|  | 1049 | out: | 
|  | 1050 | kunmap_atomic(ring, KM_USER0); | 
|  | 1051 | dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret, | 
|  | 1052 | (unsigned long)ring->head, (unsigned long)ring->tail); | 
|  | 1053 | return ret; | 
|  | 1054 | } | 
|  | 1055 |  | 
|  | 1056 | struct aio_timeout { | 
|  | 1057 | struct timer_list	timer; | 
|  | 1058 | int			timed_out; | 
|  | 1059 | struct task_struct	*p; | 
|  | 1060 | }; | 
|  | 1061 |  | 
|  | 1062 | static void timeout_func(unsigned long data) | 
|  | 1063 | { | 
|  | 1064 | struct aio_timeout *to = (struct aio_timeout *)data; | 
|  | 1065 |  | 
|  | 1066 | to->timed_out = 1; | 
|  | 1067 | wake_up_process(to->p); | 
|  | 1068 | } | 
|  | 1069 |  | 
|  | 1070 | static inline void init_timeout(struct aio_timeout *to) | 
|  | 1071 | { | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 1072 | setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | to->timed_out = 0; | 
|  | 1074 | to->p = current; | 
|  | 1075 | } | 
|  | 1076 |  | 
|  | 1077 | static inline void set_timeout(long start_jiffies, struct aio_timeout *to, | 
|  | 1078 | const struct timespec *ts) | 
|  | 1079 | { | 
|  | 1080 | to->timer.expires = start_jiffies + timespec_to_jiffies(ts); | 
|  | 1081 | if (time_after(to->timer.expires, jiffies)) | 
|  | 1082 | add_timer(&to->timer); | 
|  | 1083 | else | 
|  | 1084 | to->timed_out = 1; | 
|  | 1085 | } | 
|  | 1086 |  | 
|  | 1087 | static inline void clear_timeout(struct aio_timeout *to) | 
|  | 1088 | { | 
|  | 1089 | del_singleshot_timer_sync(&to->timer); | 
|  | 1090 | } | 
|  | 1091 |  | 
|  | 1092 | static int read_events(struct kioctx *ctx, | 
|  | 1093 | long min_nr, long nr, | 
|  | 1094 | struct io_event __user *event, | 
|  | 1095 | struct timespec __user *timeout) | 
|  | 1096 | { | 
|  | 1097 | long			start_jiffies = jiffies; | 
|  | 1098 | struct task_struct	*tsk = current; | 
|  | 1099 | DECLARE_WAITQUEUE(wait, tsk); | 
|  | 1100 | int			ret; | 
|  | 1101 | int			i = 0; | 
|  | 1102 | struct io_event		ent; | 
|  | 1103 | struct aio_timeout	to; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | int			retry = 0; | 
|  | 1105 |  | 
|  | 1106 | /* needed to zero any padding within an entry (there shouldn't be | 
|  | 1107 | * any, but C is fun! | 
|  | 1108 | */ | 
|  | 1109 | memset(&ent, 0, sizeof(ent)); | 
|  | 1110 | retry: | 
|  | 1111 | ret = 0; | 
|  | 1112 | while (likely(i < nr)) { | 
|  | 1113 | ret = aio_read_evt(ctx, &ent); | 
|  | 1114 | if (unlikely(ret <= 0)) | 
|  | 1115 | break; | 
|  | 1116 |  | 
|  | 1117 | dprintk("read event: %Lx %Lx %Lx %Lx\n", | 
|  | 1118 | ent.data, ent.obj, ent.res, ent.res2); | 
|  | 1119 |  | 
|  | 1120 | /* Could we split the check in two? */ | 
|  | 1121 | ret = -EFAULT; | 
|  | 1122 | if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { | 
|  | 1123 | dprintk("aio: lost an event due to EFAULT.\n"); | 
|  | 1124 | break; | 
|  | 1125 | } | 
|  | 1126 | ret = 0; | 
|  | 1127 |  | 
|  | 1128 | /* Good, event copied to userland, update counts. */ | 
|  | 1129 | event ++; | 
|  | 1130 | i ++; | 
|  | 1131 | } | 
|  | 1132 |  | 
|  | 1133 | if (min_nr <= i) | 
|  | 1134 | return i; | 
|  | 1135 | if (ret) | 
|  | 1136 | return ret; | 
|  | 1137 |  | 
|  | 1138 | /* End fast path */ | 
|  | 1139 |  | 
|  | 1140 | /* racey check, but it gets redone */ | 
|  | 1141 | if (!retry && unlikely(!list_empty(&ctx->run_list))) { | 
|  | 1142 | retry = 1; | 
|  | 1143 | aio_run_all_iocbs(ctx); | 
|  | 1144 | goto retry; | 
|  | 1145 | } | 
|  | 1146 |  | 
|  | 1147 | init_timeout(&to); | 
|  | 1148 | if (timeout) { | 
|  | 1149 | struct timespec	ts; | 
|  | 1150 | ret = -EFAULT; | 
|  | 1151 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) | 
|  | 1152 | goto out; | 
|  | 1153 |  | 
|  | 1154 | set_timeout(start_jiffies, &to, &ts); | 
|  | 1155 | } | 
|  | 1156 |  | 
|  | 1157 | while (likely(i < nr)) { | 
|  | 1158 | add_wait_queue_exclusive(&ctx->wait, &wait); | 
|  | 1159 | do { | 
|  | 1160 | set_task_state(tsk, TASK_INTERRUPTIBLE); | 
|  | 1161 | ret = aio_read_evt(ctx, &ent); | 
|  | 1162 | if (ret) | 
|  | 1163 | break; | 
|  | 1164 | if (min_nr <= i) | 
|  | 1165 | break; | 
| Jeff Moyer | e92adcb | 2008-04-28 02:12:04 -0700 | [diff] [blame] | 1166 | if (unlikely(ctx->dead)) { | 
|  | 1167 | ret = -EINVAL; | 
|  | 1168 | break; | 
|  | 1169 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 | if (to.timed_out)	/* Only check after read evt */ | 
|  | 1171 | break; | 
| Jeff Moyer | e00ba3d | 2007-12-04 23:45:02 -0800 | [diff] [blame] | 1172 | /* Try to only show up in io wait if there are ops | 
|  | 1173 | *  in flight */ | 
|  | 1174 | if (ctx->reqs_active) | 
|  | 1175 | io_schedule(); | 
|  | 1176 | else | 
|  | 1177 | schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | if (signal_pending(tsk)) { | 
|  | 1179 | ret = -EINTR; | 
|  | 1180 | break; | 
|  | 1181 | } | 
|  | 1182 | /*ret = aio_read_evt(ctx, &ent);*/ | 
|  | 1183 | } while (1) ; | 
|  | 1184 |  | 
|  | 1185 | set_task_state(tsk, TASK_RUNNING); | 
|  | 1186 | remove_wait_queue(&ctx->wait, &wait); | 
|  | 1187 |  | 
|  | 1188 | if (unlikely(ret <= 0)) | 
|  | 1189 | break; | 
|  | 1190 |  | 
|  | 1191 | ret = -EFAULT; | 
|  | 1192 | if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { | 
|  | 1193 | dprintk("aio: lost an event due to EFAULT.\n"); | 
|  | 1194 | break; | 
|  | 1195 | } | 
|  | 1196 |  | 
|  | 1197 | /* Good, event copied to userland, update counts. */ | 
|  | 1198 | event ++; | 
|  | 1199 | i ++; | 
|  | 1200 | } | 
|  | 1201 |  | 
|  | 1202 | if (timeout) | 
|  | 1203 | clear_timeout(&to); | 
|  | 1204 | out: | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 1205 | destroy_timer_on_stack(&to.timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | return i ? i : ret; | 
|  | 1207 | } | 
|  | 1208 |  | 
|  | 1209 | /* Take an ioctx and remove it from the list of ioctx's.  Protects | 
|  | 1210 | * against races with itself via ->dead. | 
|  | 1211 | */ | 
|  | 1212 | static void io_destroy(struct kioctx *ioctx) | 
|  | 1213 | { | 
|  | 1214 | struct mm_struct *mm = current->mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | int was_dead; | 
|  | 1216 |  | 
|  | 1217 | /* delete the entry from the list is someone else hasn't already */ | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 1218 | spin_lock(&mm->ioctx_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1219 | was_dead = ioctx->dead; | 
|  | 1220 | ioctx->dead = 1; | 
| Jens Axboe | abf137d | 2008-12-09 08:11:22 +0100 | [diff] [blame] | 1221 | hlist_del_rcu(&ioctx->list); | 
|  | 1222 | spin_unlock(&mm->ioctx_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 |  | 
|  | 1224 | dprintk("aio_release(%p)\n", ioctx); | 
|  | 1225 | if (likely(!was_dead)) | 
|  | 1226 | put_ioctx(ioctx);	/* twice for the list */ | 
|  | 1227 |  | 
|  | 1228 | aio_cancel_all(ioctx); | 
|  | 1229 | wait_for_all_aios(ioctx); | 
| Jeff Moyer | e92adcb | 2008-04-28 02:12:04 -0700 | [diff] [blame] | 1230 |  | 
|  | 1231 | /* | 
|  | 1232 | * Wake up any waiters.  The setting of ctx->dead must be seen | 
|  | 1233 | * by other CPUs at this point.  Right now, we rely on the | 
|  | 1234 | * locking done by the above calls to ensure this consistency. | 
|  | 1235 | */ | 
|  | 1236 | wake_up(&ioctx->wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | put_ioctx(ioctx);	/* once for the lookup */ | 
|  | 1238 | } | 
|  | 1239 |  | 
|  | 1240 | /* sys_io_setup: | 
|  | 1241 | *	Create an aio_context capable of receiving at least nr_events. | 
|  | 1242 | *	ctxp must not point to an aio_context that already exists, and | 
|  | 1243 | *	must be initialized to 0 prior to the call.  On successful | 
|  | 1244 | *	creation of the aio_context, *ctxp is filled in with the resulting | 
|  | 1245 | *	handle.  May fail with -EINVAL if *ctxp is not initialized, | 
|  | 1246 | *	if the specified nr_events exceeds internal limits.  May fail | 
|  | 1247 | *	with -EAGAIN if the specified nr_events exceeds the user's limit | 
|  | 1248 | *	of available events.  May fail with -ENOMEM if insufficient kernel | 
|  | 1249 | *	resources are available.  May fail with -EFAULT if an invalid | 
|  | 1250 | *	pointer is passed for ctxp.  Will fail with -ENOSYS if not | 
|  | 1251 | *	implemented. | 
|  | 1252 | */ | 
| Heiko Carstens | 002c897 | 2009-01-14 14:14:18 +0100 | [diff] [blame] | 1253 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1254 | { | 
|  | 1255 | struct kioctx *ioctx = NULL; | 
|  | 1256 | unsigned long ctx; | 
|  | 1257 | long ret; | 
|  | 1258 |  | 
|  | 1259 | ret = get_user(ctx, ctxp); | 
|  | 1260 | if (unlikely(ret)) | 
|  | 1261 | goto out; | 
|  | 1262 |  | 
|  | 1263 | ret = -EINVAL; | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 1264 | if (unlikely(ctx || nr_events == 0)) { | 
|  | 1265 | pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", | 
|  | 1266 | ctx, nr_events); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | goto out; | 
|  | 1268 | } | 
|  | 1269 |  | 
|  | 1270 | ioctx = ioctx_alloc(nr_events); | 
|  | 1271 | ret = PTR_ERR(ioctx); | 
|  | 1272 | if (!IS_ERR(ioctx)) { | 
|  | 1273 | ret = put_user(ioctx->user_id, ctxp); | 
|  | 1274 | if (!ret) | 
|  | 1275 | return 0; | 
|  | 1276 |  | 
|  | 1277 | get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ | 
|  | 1278 | io_destroy(ioctx); | 
|  | 1279 | } | 
|  | 1280 |  | 
|  | 1281 | out: | 
|  | 1282 | return ret; | 
|  | 1283 | } | 
|  | 1284 |  | 
|  | 1285 | /* sys_io_destroy: | 
|  | 1286 | *	Destroy the aio_context specified.  May cancel any outstanding | 
|  | 1287 | *	AIOs and block on completion.  Will fail with -ENOSYS if not | 
| Satoru Takeuchi | 642b512 | 2010-08-05 11:23:11 -0700 | [diff] [blame] | 1288 | *	implemented.  May fail with -EINVAL if the context pointed to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | *	is invalid. | 
|  | 1290 | */ | 
| Heiko Carstens | 002c897 | 2009-01-14 14:14:18 +0100 | [diff] [blame] | 1291 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | { | 
|  | 1293 | struct kioctx *ioctx = lookup_ioctx(ctx); | 
|  | 1294 | if (likely(NULL != ioctx)) { | 
|  | 1295 | io_destroy(ioctx); | 
|  | 1296 | return 0; | 
|  | 1297 | } | 
|  | 1298 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | 
|  | 1299 | return -EINVAL; | 
|  | 1300 | } | 
|  | 1301 |  | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1302 | static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) | 
|  | 1303 | { | 
|  | 1304 | struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; | 
|  | 1305 |  | 
|  | 1306 | BUG_ON(ret <= 0); | 
|  | 1307 |  | 
|  | 1308 | while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { | 
|  | 1309 | ssize_t this = min((ssize_t)iov->iov_len, ret); | 
|  | 1310 | iov->iov_base += this; | 
|  | 1311 | iov->iov_len -= this; | 
|  | 1312 | iocb->ki_left -= this; | 
|  | 1313 | ret -= this; | 
|  | 1314 | if (iov->iov_len == 0) { | 
|  | 1315 | iocb->ki_cur_seg++; | 
|  | 1316 | iov++; | 
|  | 1317 | } | 
|  | 1318 | } | 
|  | 1319 |  | 
|  | 1320 | /* the caller should not have done more io than what fit in | 
|  | 1321 | * the remaining iovecs */ | 
|  | 1322 | BUG_ON(ret > 0 && iocb->ki_left == 0); | 
|  | 1323 | } | 
|  | 1324 |  | 
|  | 1325 | static ssize_t aio_rw_vect_retry(struct kiocb *iocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 | { | 
|  | 1327 | struct file *file = iocb->ki_filp; | 
|  | 1328 | struct address_space *mapping = file->f_mapping; | 
|  | 1329 | struct inode *inode = mapping->host; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1330 | ssize_t (*rw_op)(struct kiocb *, const struct iovec *, | 
|  | 1331 | unsigned long, loff_t); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1332 | ssize_t ret = 0; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1333 | unsigned short opcode; | 
|  | 1334 |  | 
|  | 1335 | if ((iocb->ki_opcode == IOCB_CMD_PREADV) || | 
|  | 1336 | (iocb->ki_opcode == IOCB_CMD_PREAD)) { | 
|  | 1337 | rw_op = file->f_op->aio_read; | 
|  | 1338 | opcode = IOCB_CMD_PREADV; | 
|  | 1339 | } else { | 
|  | 1340 | rw_op = file->f_op->aio_write; | 
|  | 1341 | opcode = IOCB_CMD_PWRITEV; | 
|  | 1342 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 |  | 
| Rusty Russell | c2ec668 | 2008-02-08 04:20:15 -0800 | [diff] [blame] | 1344 | /* This matches the pread()/pwrite() logic */ | 
|  | 1345 | if (iocb->ki_pos < 0) | 
|  | 1346 | return -EINVAL; | 
|  | 1347 |  | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1348 | do { | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1349 | ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], | 
|  | 1350 | iocb->ki_nr_segs - iocb->ki_cur_seg, | 
|  | 1351 | iocb->ki_pos); | 
|  | 1352 | if (ret > 0) | 
|  | 1353 | aio_advance_iovec(iocb, ret); | 
| Badari Pulavarty | 027445c | 2006-09-30 23:28:46 -0700 | [diff] [blame] | 1354 |  | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1355 | /* retry all partial writes.  retry partial reads as long as its a | 
|  | 1356 | * regular file. */ | 
| Zach Brown | 353fb07 | 2005-09-30 11:58:56 -0700 | [diff] [blame] | 1357 | } while (ret > 0 && iocb->ki_left > 0 && | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1358 | (opcode == IOCB_CMD_PWRITEV || | 
|  | 1359 | (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 |  | 
|  | 1361 | /* This means we must have transferred all that we could */ | 
|  | 1362 | /* No need to retry anymore */ | 
|  | 1363 | if ((ret == 0) || (iocb->ki_left == 0)) | 
|  | 1364 | ret = iocb->ki_nbytes - iocb->ki_left; | 
|  | 1365 |  | 
| Rusty Russell | 7adfa2f | 2008-02-08 04:20:14 -0800 | [diff] [blame] | 1366 | /* If we managed to write some out we return that, rather than | 
|  | 1367 | * the eventual error. */ | 
|  | 1368 | if (opcode == IOCB_CMD_PWRITEV | 
|  | 1369 | && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY | 
|  | 1370 | && iocb->ki_nbytes - iocb->ki_left) | 
|  | 1371 | ret = iocb->ki_nbytes - iocb->ki_left; | 
|  | 1372 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1373 | return ret; | 
|  | 1374 | } | 
|  | 1375 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | static ssize_t aio_fdsync(struct kiocb *iocb) | 
|  | 1377 | { | 
|  | 1378 | struct file *file = iocb->ki_filp; | 
|  | 1379 | ssize_t ret = -EINVAL; | 
|  | 1380 |  | 
|  | 1381 | if (file->f_op->aio_fsync) | 
|  | 1382 | ret = file->f_op->aio_fsync(iocb, 1); | 
|  | 1383 | return ret; | 
|  | 1384 | } | 
|  | 1385 |  | 
|  | 1386 | static ssize_t aio_fsync(struct kiocb *iocb) | 
|  | 1387 | { | 
|  | 1388 | struct file *file = iocb->ki_filp; | 
|  | 1389 | ssize_t ret = -EINVAL; | 
|  | 1390 |  | 
|  | 1391 | if (file->f_op->aio_fsync) | 
|  | 1392 | ret = file->f_op->aio_fsync(iocb, 0); | 
|  | 1393 | return ret; | 
|  | 1394 | } | 
|  | 1395 |  | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1396 | static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1397 | { | 
|  | 1398 | ssize_t ret; | 
|  | 1399 |  | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1400 | #ifdef CONFIG_COMPAT | 
|  | 1401 | if (compat) | 
|  | 1402 | ret = compat_rw_copy_check_uvector(type, | 
|  | 1403 | (struct compat_iovec __user *)kiocb->ki_buf, | 
|  | 1404 | kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, | 
|  | 1405 | &kiocb->ki_iovec); | 
|  | 1406 | else | 
|  | 1407 | #endif | 
|  | 1408 | ret = rw_copy_check_uvector(type, | 
|  | 1409 | (struct iovec __user *)kiocb->ki_buf, | 
|  | 1410 | kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, | 
|  | 1411 | &kiocb->ki_iovec); | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1412 | if (ret < 0) | 
|  | 1413 | goto out; | 
|  | 1414 |  | 
|  | 1415 | kiocb->ki_nr_segs = kiocb->ki_nbytes; | 
|  | 1416 | kiocb->ki_cur_seg = 0; | 
|  | 1417 | /* ki_nbytes/left now reflect bytes instead of segs */ | 
|  | 1418 | kiocb->ki_nbytes = ret; | 
|  | 1419 | kiocb->ki_left = ret; | 
|  | 1420 |  | 
|  | 1421 | ret = 0; | 
|  | 1422 | out: | 
|  | 1423 | return ret; | 
|  | 1424 | } | 
|  | 1425 |  | 
|  | 1426 | static ssize_t aio_setup_single_vector(struct kiocb *kiocb) | 
|  | 1427 | { | 
|  | 1428 | kiocb->ki_iovec = &kiocb->ki_inline_vec; | 
|  | 1429 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; | 
|  | 1430 | kiocb->ki_iovec->iov_len = kiocb->ki_left; | 
|  | 1431 | kiocb->ki_nr_segs = 1; | 
|  | 1432 | kiocb->ki_cur_seg = 0; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1433 | return 0; | 
|  | 1434 | } | 
|  | 1435 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 | /* | 
|  | 1437 | * aio_setup_iocb: | 
|  | 1438 | *	Performs the initial checks and aio retry method | 
|  | 1439 | *	setup for the kiocb at the time of io submission. | 
|  | 1440 | */ | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1441 | static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | { | 
|  | 1443 | struct file *file = kiocb->ki_filp; | 
|  | 1444 | ssize_t ret = 0; | 
|  | 1445 |  | 
|  | 1446 | switch (kiocb->ki_opcode) { | 
|  | 1447 | case IOCB_CMD_PREAD: | 
|  | 1448 | ret = -EBADF; | 
|  | 1449 | if (unlikely(!(file->f_mode & FMODE_READ))) | 
|  | 1450 | break; | 
|  | 1451 | ret = -EFAULT; | 
|  | 1452 | if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, | 
|  | 1453 | kiocb->ki_left))) | 
|  | 1454 | break; | 
| Kostik Belousov | 8766ce4 | 2005-10-23 12:57:13 -0700 | [diff] [blame] | 1455 | ret = security_file_permission(file, MAY_READ); | 
|  | 1456 | if (unlikely(ret)) | 
|  | 1457 | break; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1458 | ret = aio_setup_single_vector(kiocb); | 
|  | 1459 | if (ret) | 
|  | 1460 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | ret = -EINVAL; | 
|  | 1462 | if (file->f_op->aio_read) | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1463 | kiocb->ki_retry = aio_rw_vect_retry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | break; | 
|  | 1465 | case IOCB_CMD_PWRITE: | 
|  | 1466 | ret = -EBADF; | 
|  | 1467 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | 
|  | 1468 | break; | 
|  | 1469 | ret = -EFAULT; | 
|  | 1470 | if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, | 
|  | 1471 | kiocb->ki_left))) | 
|  | 1472 | break; | 
| Kostik Belousov | 8766ce4 | 2005-10-23 12:57:13 -0700 | [diff] [blame] | 1473 | ret = security_file_permission(file, MAY_WRITE); | 
|  | 1474 | if (unlikely(ret)) | 
|  | 1475 | break; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1476 | ret = aio_setup_single_vector(kiocb); | 
|  | 1477 | if (ret) | 
|  | 1478 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 | ret = -EINVAL; | 
|  | 1480 | if (file->f_op->aio_write) | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1481 | kiocb->ki_retry = aio_rw_vect_retry; | 
|  | 1482 | break; | 
|  | 1483 | case IOCB_CMD_PREADV: | 
|  | 1484 | ret = -EBADF; | 
|  | 1485 | if (unlikely(!(file->f_mode & FMODE_READ))) | 
|  | 1486 | break; | 
|  | 1487 | ret = security_file_permission(file, MAY_READ); | 
|  | 1488 | if (unlikely(ret)) | 
|  | 1489 | break; | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1490 | ret = aio_setup_vectored_rw(READ, kiocb, compat); | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1491 | if (ret) | 
|  | 1492 | break; | 
|  | 1493 | ret = -EINVAL; | 
|  | 1494 | if (file->f_op->aio_read) | 
|  | 1495 | kiocb->ki_retry = aio_rw_vect_retry; | 
|  | 1496 | break; | 
|  | 1497 | case IOCB_CMD_PWRITEV: | 
|  | 1498 | ret = -EBADF; | 
|  | 1499 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | 
|  | 1500 | break; | 
|  | 1501 | ret = security_file_permission(file, MAY_WRITE); | 
|  | 1502 | if (unlikely(ret)) | 
|  | 1503 | break; | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1504 | ret = aio_setup_vectored_rw(WRITE, kiocb, compat); | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1505 | if (ret) | 
|  | 1506 | break; | 
|  | 1507 | ret = -EINVAL; | 
|  | 1508 | if (file->f_op->aio_write) | 
|  | 1509 | kiocb->ki_retry = aio_rw_vect_retry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | break; | 
|  | 1511 | case IOCB_CMD_FDSYNC: | 
|  | 1512 | ret = -EINVAL; | 
|  | 1513 | if (file->f_op->aio_fsync) | 
|  | 1514 | kiocb->ki_retry = aio_fdsync; | 
|  | 1515 | break; | 
|  | 1516 | case IOCB_CMD_FSYNC: | 
|  | 1517 | ret = -EINVAL; | 
|  | 1518 | if (file->f_op->aio_fsync) | 
|  | 1519 | kiocb->ki_retry = aio_fsync; | 
|  | 1520 | break; | 
|  | 1521 | default: | 
|  | 1522 | dprintk("EINVAL: io_submit: no operation provided\n"); | 
|  | 1523 | ret = -EINVAL; | 
|  | 1524 | } | 
|  | 1525 |  | 
|  | 1526 | if (!kiocb->ki_retry) | 
|  | 1527 | return ret; | 
|  | 1528 |  | 
|  | 1529 | return 0; | 
|  | 1530 | } | 
|  | 1531 |  | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 1532 | static void aio_batch_add(struct address_space *mapping, | 
|  | 1533 | struct hlist_head *batch_hash) | 
|  | 1534 | { | 
|  | 1535 | struct aio_batch_entry *abe; | 
|  | 1536 | struct hlist_node *pos; | 
|  | 1537 | unsigned bucket; | 
|  | 1538 |  | 
|  | 1539 | bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS); | 
|  | 1540 | hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) { | 
|  | 1541 | if (abe->mapping == mapping) | 
|  | 1542 | return; | 
|  | 1543 | } | 
|  | 1544 |  | 
|  | 1545 | abe = mempool_alloc(abe_pool, GFP_KERNEL); | 
| Chris Mason | 306fb09 | 2010-08-23 10:47:55 -0400 | [diff] [blame] | 1546 |  | 
|  | 1547 | /* | 
|  | 1548 | * we should be using igrab here, but | 
|  | 1549 | * we don't want to hammer on the global | 
|  | 1550 | * inode spinlock just to take an extra | 
|  | 1551 | * reference on a file that we must already | 
|  | 1552 | * have a reference to. | 
|  | 1553 | * | 
|  | 1554 | * When we're called, we always have a reference | 
|  | 1555 | * on the file, so we must always have a reference | 
| Al Viro | 7de9c6e | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 1556 | * on the inode, so ihold() is safe here. | 
| Chris Mason | 306fb09 | 2010-08-23 10:47:55 -0400 | [diff] [blame] | 1557 | */ | 
| Al Viro | 7de9c6e | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 1558 | ihold(mapping->host); | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 1559 | abe->mapping = mapping; | 
|  | 1560 | hlist_add_head(&abe->list, &batch_hash[bucket]); | 
|  | 1561 | return; | 
|  | 1562 | } | 
|  | 1563 |  | 
|  | 1564 | static void aio_batch_free(struct hlist_head *batch_hash) | 
|  | 1565 | { | 
|  | 1566 | struct aio_batch_entry *abe; | 
|  | 1567 | struct hlist_node *pos, *n; | 
|  | 1568 | int i; | 
|  | 1569 |  | 
|  | 1570 | for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) { | 
|  | 1571 | hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) { | 
|  | 1572 | blk_run_address_space(abe->mapping); | 
|  | 1573 | iput(abe->mapping->host); | 
|  | 1574 | hlist_del(&abe->list); | 
|  | 1575 | mempool_free(abe, abe_pool); | 
|  | 1576 | } | 
|  | 1577 | } | 
|  | 1578 | } | 
|  | 1579 |  | 
| Adrian Bunk | d5470b5 | 2008-04-29 00:58:57 -0700 | [diff] [blame] | 1580 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1581 | struct iocb *iocb, struct hlist_head *batch_hash, | 
|  | 1582 | bool compat) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1583 | { | 
|  | 1584 | struct kiocb *req; | 
|  | 1585 | struct file *file; | 
|  | 1586 | ssize_t ret; | 
|  | 1587 |  | 
|  | 1588 | /* enforce forwards compatibility on users */ | 
| Davide Libenzi | 9c3060b | 2007-05-10 22:23:21 -0700 | [diff] [blame] | 1589 | if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 | pr_debug("EINVAL: io_submit: reserve field set\n"); | 
|  | 1591 | return -EINVAL; | 
|  | 1592 | } | 
|  | 1593 |  | 
|  | 1594 | /* prevent overflows */ | 
|  | 1595 | if (unlikely( | 
|  | 1596 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | 
|  | 1597 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | 
|  | 1598 | ((ssize_t)iocb->aio_nbytes < 0) | 
|  | 1599 | )) { | 
|  | 1600 | pr_debug("EINVAL: io_submit: overflow check\n"); | 
|  | 1601 | return -EINVAL; | 
|  | 1602 | } | 
|  | 1603 |  | 
|  | 1604 | file = fget(iocb->aio_fildes); | 
|  | 1605 | if (unlikely(!file)) | 
|  | 1606 | return -EBADF; | 
|  | 1607 |  | 
|  | 1608 | req = aio_get_req(ctx);		/* returns with 2 references to req */ | 
|  | 1609 | if (unlikely(!req)) { | 
|  | 1610 | fput(file); | 
|  | 1611 | return -EAGAIN; | 
|  | 1612 | } | 
| Yan Zheng | 87e2831 | 2007-10-08 12:16:20 -0700 | [diff] [blame] | 1613 | req->ki_filp = file; | 
| Davide Libenzi | 9c3060b | 2007-05-10 22:23:21 -0700 | [diff] [blame] | 1614 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { | 
|  | 1615 | /* | 
|  | 1616 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | 
|  | 1617 | * instance of the file* now. The file descriptor must be | 
|  | 1618 | * an eventfd() fd, and will be signaled for each completed | 
|  | 1619 | * event using the eventfd_signal() function. | 
|  | 1620 | */ | 
| Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 1621 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); | 
| Hirofumi Nakagawa | 801678c | 2008-04-29 01:03:09 -0700 | [diff] [blame] | 1622 | if (IS_ERR(req->ki_eventfd)) { | 
| Davide Libenzi | 9c3060b | 2007-05-10 22:23:21 -0700 | [diff] [blame] | 1623 | ret = PTR_ERR(req->ki_eventfd); | 
| Davide Libenzi | 87c3a86 | 2009-03-18 17:04:19 -0700 | [diff] [blame] | 1624 | req->ki_eventfd = NULL; | 
| Davide Libenzi | 9c3060b | 2007-05-10 22:23:21 -0700 | [diff] [blame] | 1625 | goto out_put_req; | 
|  | 1626 | } | 
|  | 1627 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1628 |  | 
| Ken Chen | 212079c | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 1629 | ret = put_user(req->ki_key, &user_iocb->aio_key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1630 | if (unlikely(ret)) { | 
|  | 1631 | dprintk("EFAULT: aio_key\n"); | 
|  | 1632 | goto out_put_req; | 
|  | 1633 | } | 
|  | 1634 |  | 
|  | 1635 | req->ki_obj.user = user_iocb; | 
|  | 1636 | req->ki_user_data = iocb->aio_data; | 
|  | 1637 | req->ki_pos = iocb->aio_offset; | 
|  | 1638 |  | 
|  | 1639 | req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; | 
|  | 1640 | req->ki_left = req->ki_nbytes = iocb->aio_nbytes; | 
|  | 1641 | req->ki_opcode = iocb->aio_lio_opcode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1642 |  | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1643 | ret = aio_setup_iocb(req, compat); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 |  | 
|  | 1645 | if (ret) | 
|  | 1646 | goto out_put_req; | 
|  | 1647 |  | 
|  | 1648 | spin_lock_irq(&ctx->ctx_lock); | 
| Benjamin LaHaise | ac0b1bc | 2005-09-09 13:02:09 -0700 | [diff] [blame] | 1649 | aio_run_iocb(req); | 
| Benjamin LaHaise | ac0b1bc | 2005-09-09 13:02:09 -0700 | [diff] [blame] | 1650 | if (!list_empty(&ctx->run_list)) { | 
| Ken Chen | 954d3e9 | 2005-05-01 08:59:16 -0700 | [diff] [blame] | 1651 | /* drain the run list */ | 
|  | 1652 | while (__aio_run_iocbs(ctx)) | 
|  | 1653 | ; | 
|  | 1654 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1655 | spin_unlock_irq(&ctx->ctx_lock); | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 1656 | if (req->ki_opcode == IOCB_CMD_PREAD || | 
|  | 1657 | req->ki_opcode == IOCB_CMD_PREADV || | 
|  | 1658 | req->ki_opcode == IOCB_CMD_PWRITE || | 
|  | 1659 | req->ki_opcode == IOCB_CMD_PWRITEV) | 
|  | 1660 | aio_batch_add(file->f_mapping, batch_hash); | 
|  | 1661 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1662 | aio_put_req(req);	/* drop extra ref to req */ | 
|  | 1663 | return 0; | 
|  | 1664 |  | 
|  | 1665 | out_put_req: | 
|  | 1666 | aio_put_req(req);	/* drop extra ref to req */ | 
|  | 1667 | aio_put_req(req);	/* drop i/o ref to req */ | 
|  | 1668 | return ret; | 
|  | 1669 | } | 
|  | 1670 |  | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1671 | long do_io_submit(aio_context_t ctx_id, long nr, | 
|  | 1672 | struct iocb __user *__user *iocbpp, bool compat) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1673 | { | 
|  | 1674 | struct kioctx *ctx; | 
|  | 1675 | long ret = 0; | 
|  | 1676 | int i; | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 1677 | struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1678 |  | 
|  | 1679 | if (unlikely(nr < 0)) | 
|  | 1680 | return -EINVAL; | 
|  | 1681 |  | 
| Jeff Moyer | 75e1c70 | 2010-09-10 14:16:00 -0700 | [diff] [blame] | 1682 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) | 
|  | 1683 | nr = LONG_MAX/sizeof(*iocbpp); | 
|  | 1684 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1685 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) | 
|  | 1686 | return -EFAULT; | 
|  | 1687 |  | 
|  | 1688 | ctx = lookup_ioctx(ctx_id); | 
|  | 1689 | if (unlikely(!ctx)) { | 
|  | 1690 | pr_debug("EINVAL: io_submit: invalid context id\n"); | 
|  | 1691 | return -EINVAL; | 
|  | 1692 | } | 
|  | 1693 |  | 
|  | 1694 | /* | 
|  | 1695 | * AKPM: should this return a partial result if some of the IOs were | 
|  | 1696 | * successfully submitted? | 
|  | 1697 | */ | 
|  | 1698 | for (i=0; i<nr; i++) { | 
|  | 1699 | struct iocb __user *user_iocb; | 
|  | 1700 | struct iocb tmp; | 
|  | 1701 |  | 
|  | 1702 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { | 
|  | 1703 | ret = -EFAULT; | 
|  | 1704 | break; | 
|  | 1705 | } | 
|  | 1706 |  | 
|  | 1707 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { | 
|  | 1708 | ret = -EFAULT; | 
|  | 1709 | break; | 
|  | 1710 | } | 
|  | 1711 |  | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1712 | ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1713 | if (ret) | 
|  | 1714 | break; | 
|  | 1715 | } | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 1716 | aio_batch_free(batch_hash); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1717 |  | 
|  | 1718 | put_ioctx(ctx); | 
|  | 1719 | return i ? i : ret; | 
|  | 1720 | } | 
|  | 1721 |  | 
| Jeff Moyer | 9d85cba | 2010-05-26 14:44:26 -0700 | [diff] [blame] | 1722 | /* sys_io_submit: | 
|  | 1723 | *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns | 
|  | 1724 | *	the number of iocbs queued.  May return -EINVAL if the aio_context | 
|  | 1725 | *	specified by ctx_id is invalid, if nr is < 0, if the iocb at | 
|  | 1726 | *	*iocbpp[0] is not properly initialized, if the operation specified | 
|  | 1727 | *	is invalid for the file descriptor in the iocb.  May fail with | 
|  | 1728 | *	-EFAULT if any of the data structures point to invalid data.  May | 
|  | 1729 | *	fail with -EBADF if the file descriptor specified in the first | 
|  | 1730 | *	iocb is invalid.  May fail with -EAGAIN if insufficient resources | 
|  | 1731 | *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will | 
|  | 1732 | *	fail with -ENOSYS if not implemented. | 
|  | 1733 | */ | 
|  | 1734 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | 
|  | 1735 | struct iocb __user * __user *, iocbpp) | 
|  | 1736 | { | 
|  | 1737 | return do_io_submit(ctx_id, nr, iocbpp, 0); | 
|  | 1738 | } | 
|  | 1739 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | /* lookup_kiocb | 
|  | 1741 | *	Finds a given iocb for cancellation. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1742 | */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 1743 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | 
|  | 1744 | u32 key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1745 | { | 
|  | 1746 | struct list_head *pos; | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 1747 |  | 
|  | 1748 | assert_spin_locked(&ctx->ctx_lock); | 
|  | 1749 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 | /* TODO: use a hash or array, this sucks. */ | 
|  | 1751 | list_for_each(pos, &ctx->active_reqs) { | 
|  | 1752 | struct kiocb *kiocb = list_kiocb(pos); | 
|  | 1753 | if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) | 
|  | 1754 | return kiocb; | 
|  | 1755 | } | 
|  | 1756 | return NULL; | 
|  | 1757 | } | 
|  | 1758 |  | 
|  | 1759 | /* sys_io_cancel: | 
|  | 1760 | *	Attempts to cancel an iocb previously passed to io_submit.  If | 
|  | 1761 | *	the operation is successfully cancelled, the resulting event is | 
|  | 1762 | *	copied into the memory pointed to by result without being placed | 
|  | 1763 | *	into the completion queue and 0 is returned.  May fail with | 
|  | 1764 | *	-EFAULT if any of the data structures pointed to are invalid. | 
|  | 1765 | *	May fail with -EINVAL if aio_context specified by ctx_id is | 
|  | 1766 | *	invalid.  May fail with -EAGAIN if the iocb specified was not | 
|  | 1767 | *	cancelled.  Will fail with -ENOSYS if not implemented. | 
|  | 1768 | */ | 
| Heiko Carstens | 002c897 | 2009-01-14 14:14:18 +0100 | [diff] [blame] | 1769 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | 
|  | 1770 | struct io_event __user *, result) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1771 | { | 
|  | 1772 | int (*cancel)(struct kiocb *iocb, struct io_event *res); | 
|  | 1773 | struct kioctx *ctx; | 
|  | 1774 | struct kiocb *kiocb; | 
|  | 1775 | u32 key; | 
|  | 1776 | int ret; | 
|  | 1777 |  | 
|  | 1778 | ret = get_user(key, &iocb->aio_key); | 
|  | 1779 | if (unlikely(ret)) | 
|  | 1780 | return -EFAULT; | 
|  | 1781 |  | 
|  | 1782 | ctx = lookup_ioctx(ctx_id); | 
|  | 1783 | if (unlikely(!ctx)) | 
|  | 1784 | return -EINVAL; | 
|  | 1785 |  | 
|  | 1786 | spin_lock_irq(&ctx->ctx_lock); | 
|  | 1787 | ret = -EAGAIN; | 
|  | 1788 | kiocb = lookup_kiocb(ctx, iocb, key); | 
|  | 1789 | if (kiocb && kiocb->ki_cancel) { | 
|  | 1790 | cancel = kiocb->ki_cancel; | 
|  | 1791 | kiocb->ki_users ++; | 
|  | 1792 | kiocbSetCancelled(kiocb); | 
|  | 1793 | } else | 
|  | 1794 | cancel = NULL; | 
|  | 1795 | spin_unlock_irq(&ctx->ctx_lock); | 
|  | 1796 |  | 
|  | 1797 | if (NULL != cancel) { | 
|  | 1798 | struct io_event tmp; | 
|  | 1799 | pr_debug("calling cancel\n"); | 
|  | 1800 | memset(&tmp, 0, sizeof(tmp)); | 
|  | 1801 | tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; | 
|  | 1802 | tmp.data = kiocb->ki_user_data; | 
|  | 1803 | ret = cancel(kiocb, &tmp); | 
|  | 1804 | if (!ret) { | 
|  | 1805 | /* Cancellation succeeded -- copy the result | 
|  | 1806 | * into the user's buffer. | 
|  | 1807 | */ | 
|  | 1808 | if (copy_to_user(result, &tmp, sizeof(tmp))) | 
|  | 1809 | ret = -EFAULT; | 
|  | 1810 | } | 
|  | 1811 | } else | 
| Wendy Cheng | 8f58202 | 2005-09-09 13:02:08 -0700 | [diff] [blame] | 1812 | ret = -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1813 |  | 
|  | 1814 | put_ioctx(ctx); | 
|  | 1815 |  | 
|  | 1816 | return ret; | 
|  | 1817 | } | 
|  | 1818 |  | 
|  | 1819 | /* io_getevents: | 
|  | 1820 | *	Attempts to read at least min_nr events and up to nr events from | 
| Satoru Takeuchi | 642b512 | 2010-08-05 11:23:11 -0700 | [diff] [blame] | 1821 | *	the completion queue for the aio_context specified by ctx_id. If | 
|  | 1822 | *	it succeeds, the number of read events is returned. May fail with | 
|  | 1823 | *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is | 
|  | 1824 | *	out of range, if timeout is out of range.  May fail with -EFAULT | 
|  | 1825 | *	if any of the memory specified is invalid.  May return 0 or | 
|  | 1826 | *	< min_nr if the timeout specified by timeout has elapsed | 
|  | 1827 | *	before sufficient events are available, where timeout == NULL | 
|  | 1828 | *	specifies an infinite timeout. Note that the timeout pointed to by | 
|  | 1829 | *	timeout is relative and will be updated if not NULL and the | 
|  | 1830 | *	operation blocks. Will fail with -ENOSYS if not implemented. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1831 | */ | 
| Heiko Carstens | 002c897 | 2009-01-14 14:14:18 +0100 | [diff] [blame] | 1832 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, | 
|  | 1833 | long, min_nr, | 
|  | 1834 | long, nr, | 
|  | 1835 | struct io_event __user *, events, | 
|  | 1836 | struct timespec __user *, timeout) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1837 | { | 
|  | 1838 | struct kioctx *ioctx = lookup_ioctx(ctx_id); | 
|  | 1839 | long ret = -EINVAL; | 
|  | 1840 |  | 
|  | 1841 | if (likely(ioctx)) { | 
|  | 1842 | if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0)) | 
|  | 1843 | ret = read_events(ioctx, min_nr, nr, events, timeout); | 
|  | 1844 | put_ioctx(ioctx); | 
|  | 1845 | } | 
|  | 1846 |  | 
| Roland McGrath | 598af05 | 2008-04-10 15:38:45 -0700 | [diff] [blame] | 1847 | asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1848 | return ret; | 
|  | 1849 | } |