| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	An async IO implementation for Linux | 
 | 3 |  *	Written by Benjamin LaHaise <bcrl@kvack.org> | 
 | 4 |  * | 
 | 5 |  *	Implements an efficient asynchronous io interface. | 
 | 6 |  * | 
 | 7 |  *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved. | 
 | 8 |  * | 
 | 9 |  *	See ../COPYING for licensing terms. | 
 | 10 |  */ | 
 | 11 | #include <linux/kernel.h> | 
 | 12 | #include <linux/init.h> | 
 | 13 | #include <linux/errno.h> | 
 | 14 | #include <linux/time.h> | 
 | 15 | #include <linux/aio_abi.h> | 
 | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/syscalls.h> | 
| Badari Pulavarty | 027445c | 2006-09-30 23:28:46 -0700 | [diff] [blame] | 18 | #include <linux/uio.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  | 
 | 20 | #define DEBUG 0 | 
 | 21 |  | 
 | 22 | #include <linux/sched.h> | 
 | 23 | #include <linux/fs.h> | 
 | 24 | #include <linux/file.h> | 
 | 25 | #include <linux/mm.h> | 
 | 26 | #include <linux/mman.h> | 
 | 27 | #include <linux/slab.h> | 
 | 28 | #include <linux/timer.h> | 
 | 29 | #include <linux/aio.h> | 
 | 30 | #include <linux/highmem.h> | 
 | 31 | #include <linux/workqueue.h> | 
 | 32 | #include <linux/security.h> | 
 | 33 |  | 
 | 34 | #include <asm/kmap_types.h> | 
 | 35 | #include <asm/uaccess.h> | 
 | 36 | #include <asm/mmu_context.h> | 
 | 37 |  | 
 | 38 | #if DEBUG > 1 | 
 | 39 | #define dprintk		printk | 
 | 40 | #else | 
 | 41 | #define dprintk(x...)	do { ; } while (0) | 
 | 42 | #endif | 
 | 43 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | /*------ sysctl variables----*/ | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 45 | static DEFINE_SPINLOCK(aio_nr_lock); | 
 | 46 | unsigned long aio_nr;		/* current system wide number of aio requests */ | 
 | 47 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | /*----end sysctl variables---*/ | 
 | 49 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 50 | static struct kmem_cache	*kiocb_cachep; | 
 | 51 | static struct kmem_cache	*kioctx_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 |  | 
 | 53 | static struct workqueue_struct *aio_wq; | 
 | 54 |  | 
 | 55 | /* Used for rare fput completion. */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 56 | static void aio_fput_routine(struct work_struct *); | 
 | 57 | static DECLARE_WORK(fput_work, aio_fput_routine); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 |  | 
 | 59 | static DEFINE_SPINLOCK(fput_lock); | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 60 | static LIST_HEAD(fput_head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 62 | static void aio_kick_handler(struct work_struct *); | 
| Sébastien Dugu | c016e22 | 2005-06-28 20:44:59 -0700 | [diff] [blame] | 63 | static void aio_queue_work(struct kioctx *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 |  | 
 | 65 | /* aio_setup | 
 | 66 |  *	Creates the slab caches used by the aio routines, panic on | 
 | 67 |  *	failure as this is done early during the boot sequence. | 
 | 68 |  */ | 
 | 69 | static int __init aio_setup(void) | 
 | 70 | { | 
 | 71 | 	kiocb_cachep = kmem_cache_create("kiocb", sizeof(struct kiocb), | 
 | 72 | 				0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 
 | 73 | 	kioctx_cachep = kmem_cache_create("kioctx", sizeof(struct kioctx), | 
 | 74 | 				0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 
 | 75 |  | 
 | 76 | 	aio_wq = create_workqueue("aio"); | 
 | 77 |  | 
 | 78 | 	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); | 
 | 79 |  | 
 | 80 | 	return 0; | 
 | 81 | } | 
 | 82 |  | 
 | 83 | static void aio_free_ring(struct kioctx *ctx) | 
 | 84 | { | 
 | 85 | 	struct aio_ring_info *info = &ctx->ring_info; | 
 | 86 | 	long i; | 
 | 87 |  | 
 | 88 | 	for (i=0; i<info->nr_pages; i++) | 
 | 89 | 		put_page(info->ring_pages[i]); | 
 | 90 |  | 
 | 91 | 	if (info->mmap_size) { | 
 | 92 | 		down_write(&ctx->mm->mmap_sem); | 
 | 93 | 		do_munmap(ctx->mm, info->mmap_base, info->mmap_size); | 
 | 94 | 		up_write(&ctx->mm->mmap_sem); | 
 | 95 | 	} | 
 | 96 |  | 
 | 97 | 	if (info->ring_pages && info->ring_pages != info->internal_pages) | 
 | 98 | 		kfree(info->ring_pages); | 
 | 99 | 	info->ring_pages = NULL; | 
 | 100 | 	info->nr = 0; | 
 | 101 | } | 
 | 102 |  | 
 | 103 | static int aio_setup_ring(struct kioctx *ctx) | 
 | 104 | { | 
 | 105 | 	struct aio_ring *ring; | 
 | 106 | 	struct aio_ring_info *info = &ctx->ring_info; | 
 | 107 | 	unsigned nr_events = ctx->max_reqs; | 
 | 108 | 	unsigned long size; | 
 | 109 | 	int nr_pages; | 
 | 110 |  | 
 | 111 | 	/* Compensate for the ring buffer's head/tail overlap entry */ | 
 | 112 | 	nr_events += 2;	/* 1 is required, 2 for good luck */ | 
 | 113 |  | 
 | 114 | 	size = sizeof(struct aio_ring); | 
 | 115 | 	size += sizeof(struct io_event) * nr_events; | 
 | 116 | 	nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; | 
 | 117 |  | 
 | 118 | 	if (nr_pages < 0) | 
 | 119 | 		return -EINVAL; | 
 | 120 |  | 
 | 121 | 	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); | 
 | 122 |  | 
 | 123 | 	info->nr = 0; | 
 | 124 | 	info->ring_pages = info->internal_pages; | 
 | 125 | 	if (nr_pages > AIO_RING_PAGES) { | 
| Oliver Neukum | 11b0b5a | 2006-03-25 03:08:13 -0800 | [diff] [blame] | 126 | 		info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | 		if (!info->ring_pages) | 
 | 128 | 			return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | 	} | 
 | 130 |  | 
 | 131 | 	info->mmap_size = nr_pages * PAGE_SIZE; | 
 | 132 | 	dprintk("attempting mmap of %lu bytes\n", info->mmap_size); | 
 | 133 | 	down_write(&ctx->mm->mmap_sem); | 
 | 134 | 	info->mmap_base = do_mmap(NULL, 0, info->mmap_size,  | 
 | 135 | 				  PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, | 
 | 136 | 				  0); | 
 | 137 | 	if (IS_ERR((void *)info->mmap_base)) { | 
 | 138 | 		up_write(&ctx->mm->mmap_sem); | 
 | 139 | 		printk("mmap err: %ld\n", -info->mmap_base); | 
 | 140 | 		info->mmap_size = 0; | 
 | 141 | 		aio_free_ring(ctx); | 
 | 142 | 		return -EAGAIN; | 
 | 143 | 	} | 
 | 144 |  | 
 | 145 | 	dprintk("mmap address: 0x%08lx\n", info->mmap_base); | 
 | 146 | 	info->nr_pages = get_user_pages(current, ctx->mm, | 
 | 147 | 					info->mmap_base, nr_pages,  | 
 | 148 | 					1, 0, info->ring_pages, NULL); | 
 | 149 | 	up_write(&ctx->mm->mmap_sem); | 
 | 150 |  | 
 | 151 | 	if (unlikely(info->nr_pages != nr_pages)) { | 
 | 152 | 		aio_free_ring(ctx); | 
 | 153 | 		return -EAGAIN; | 
 | 154 | 	} | 
 | 155 |  | 
 | 156 | 	ctx->user_id = info->mmap_base; | 
 | 157 |  | 
 | 158 | 	info->nr = nr_events;		/* trusted copy */ | 
 | 159 |  | 
 | 160 | 	ring = kmap_atomic(info->ring_pages[0], KM_USER0); | 
 | 161 | 	ring->nr = nr_events;	/* user copy */ | 
 | 162 | 	ring->id = ctx->user_id; | 
 | 163 | 	ring->head = ring->tail = 0; | 
 | 164 | 	ring->magic = AIO_RING_MAGIC; | 
 | 165 | 	ring->compat_features = AIO_RING_COMPAT_FEATURES; | 
 | 166 | 	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | 
 | 167 | 	ring->header_length = sizeof(struct aio_ring); | 
 | 168 | 	kunmap_atomic(ring, KM_USER0); | 
 | 169 |  | 
 | 170 | 	return 0; | 
 | 171 | } | 
 | 172 |  | 
 | 173 |  | 
 | 174 | /* aio_ring_event: returns a pointer to the event at the given index from | 
 | 175 |  * kmap_atomic(, km).  Release the pointer with put_aio_ring_event(); | 
 | 176 |  */ | 
 | 177 | #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event)) | 
 | 178 | #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | 
 | 179 | #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | 
 | 180 |  | 
 | 181 | #define aio_ring_event(info, nr, km) ({					\ | 
 | 182 | 	unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\ | 
 | 183 | 	struct io_event *__event;					\ | 
 | 184 | 	__event = kmap_atomic(						\ | 
 | 185 | 			(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ | 
 | 186 | 	__event += pos % AIO_EVENTS_PER_PAGE;				\ | 
 | 187 | 	__event;							\ | 
 | 188 | }) | 
 | 189 |  | 
 | 190 | #define put_aio_ring_event(event, km) do {	\ | 
 | 191 | 	struct io_event *__event = (event);	\ | 
 | 192 | 	(void)__event;				\ | 
 | 193 | 	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ | 
 | 194 | } while(0) | 
 | 195 |  | 
 | 196 | /* ioctx_alloc | 
 | 197 |  *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed. | 
 | 198 |  */ | 
 | 199 | static struct kioctx *ioctx_alloc(unsigned nr_events) | 
 | 200 | { | 
 | 201 | 	struct mm_struct *mm; | 
 | 202 | 	struct kioctx *ctx; | 
 | 203 |  | 
 | 204 | 	/* Prevent overflows */ | 
 | 205 | 	if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | 
 | 206 | 	    (nr_events > (0x10000000U / sizeof(struct kiocb)))) { | 
 | 207 | 		pr_debug("ENOMEM: nr_events too high\n"); | 
 | 208 | 		return ERR_PTR(-EINVAL); | 
 | 209 | 	} | 
 | 210 |  | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 211 | 	if ((unsigned long)nr_events > aio_max_nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | 		return ERR_PTR(-EAGAIN); | 
 | 213 |  | 
 | 214 | 	ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); | 
 | 215 | 	if (!ctx) | 
 | 216 | 		return ERR_PTR(-ENOMEM); | 
 | 217 |  | 
 | 218 | 	memset(ctx, 0, sizeof(*ctx)); | 
 | 219 | 	ctx->max_reqs = nr_events; | 
 | 220 | 	mm = ctx->mm = current->mm; | 
 | 221 | 	atomic_inc(&mm->mm_count); | 
 | 222 |  | 
 | 223 | 	atomic_set(&ctx->users, 1); | 
 | 224 | 	spin_lock_init(&ctx->ctx_lock); | 
 | 225 | 	spin_lock_init(&ctx->ring_info.ring_lock); | 
 | 226 | 	init_waitqueue_head(&ctx->wait); | 
 | 227 |  | 
 | 228 | 	INIT_LIST_HEAD(&ctx->active_reqs); | 
 | 229 | 	INIT_LIST_HEAD(&ctx->run_list); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 230 | 	INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 |  | 
 | 232 | 	if (aio_setup_ring(ctx) < 0) | 
 | 233 | 		goto out_freectx; | 
 | 234 |  | 
 | 235 | 	/* limit the number of system wide aios */ | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 236 | 	spin_lock(&aio_nr_lock); | 
 | 237 | 	if (aio_nr + ctx->max_reqs > aio_max_nr || | 
 | 238 | 	    aio_nr + ctx->max_reqs < aio_nr) | 
 | 239 | 		ctx->max_reqs = 0; | 
 | 240 | 	else | 
 | 241 | 		aio_nr += ctx->max_reqs; | 
 | 242 | 	spin_unlock(&aio_nr_lock); | 
 | 243 | 	if (ctx->max_reqs == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | 		goto out_cleanup; | 
 | 245 |  | 
 | 246 | 	/* now link into global list.  kludge.  FIXME */ | 
 | 247 | 	write_lock(&mm->ioctx_list_lock); | 
 | 248 | 	ctx->next = mm->ioctx_list; | 
 | 249 | 	mm->ioctx_list = ctx; | 
 | 250 | 	write_unlock(&mm->ioctx_list_lock); | 
 | 251 |  | 
 | 252 | 	dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", | 
 | 253 | 		ctx, ctx->user_id, current->mm, ctx->ring_info.nr); | 
 | 254 | 	return ctx; | 
 | 255 |  | 
 | 256 | out_cleanup: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | 	__put_ioctx(ctx); | 
 | 258 | 	return ERR_PTR(-EAGAIN); | 
 | 259 |  | 
 | 260 | out_freectx: | 
 | 261 | 	mmdrop(mm); | 
 | 262 | 	kmem_cache_free(kioctx_cachep, ctx); | 
 | 263 | 	ctx = ERR_PTR(-ENOMEM); | 
 | 264 |  | 
 | 265 | 	dprintk("aio: error allocating ioctx %p\n", ctx); | 
 | 266 | 	return ctx; | 
 | 267 | } | 
 | 268 |  | 
 | 269 | /* aio_cancel_all | 
 | 270 |  *	Cancels all outstanding aio requests on an aio context.  Used  | 
 | 271 |  *	when the processes owning a context have all exited to encourage  | 
 | 272 |  *	the rapid destruction of the kioctx. | 
 | 273 |  */ | 
 | 274 | static void aio_cancel_all(struct kioctx *ctx) | 
 | 275 | { | 
 | 276 | 	int (*cancel)(struct kiocb *, struct io_event *); | 
 | 277 | 	struct io_event res; | 
 | 278 | 	spin_lock_irq(&ctx->ctx_lock); | 
 | 279 | 	ctx->dead = 1; | 
 | 280 | 	while (!list_empty(&ctx->active_reqs)) { | 
 | 281 | 		struct list_head *pos = ctx->active_reqs.next; | 
 | 282 | 		struct kiocb *iocb = list_kiocb(pos); | 
 | 283 | 		list_del_init(&iocb->ki_list); | 
 | 284 | 		cancel = iocb->ki_cancel; | 
 | 285 | 		kiocbSetCancelled(iocb); | 
 | 286 | 		if (cancel) { | 
 | 287 | 			iocb->ki_users++; | 
 | 288 | 			spin_unlock_irq(&ctx->ctx_lock); | 
 | 289 | 			cancel(iocb, &res); | 
 | 290 | 			spin_lock_irq(&ctx->ctx_lock); | 
 | 291 | 		} | 
 | 292 | 	} | 
 | 293 | 	spin_unlock_irq(&ctx->ctx_lock); | 
 | 294 | } | 
 | 295 |  | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 296 | static void wait_for_all_aios(struct kioctx *ctx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | { | 
 | 298 | 	struct task_struct *tsk = current; | 
 | 299 | 	DECLARE_WAITQUEUE(wait, tsk); | 
 | 300 |  | 
 | 301 | 	if (!ctx->reqs_active) | 
 | 302 | 		return; | 
 | 303 |  | 
 | 304 | 	add_wait_queue(&ctx->wait, &wait); | 
 | 305 | 	set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
 | 306 | 	while (ctx->reqs_active) { | 
 | 307 | 		schedule(); | 
 | 308 | 		set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
 | 309 | 	} | 
 | 310 | 	__set_task_state(tsk, TASK_RUNNING); | 
 | 311 | 	remove_wait_queue(&ctx->wait, &wait); | 
 | 312 | } | 
 | 313 |  | 
 | 314 | /* wait_on_sync_kiocb: | 
 | 315 |  *	Waits on the given sync kiocb to complete. | 
 | 316 |  */ | 
 | 317 | ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) | 
 | 318 | { | 
 | 319 | 	while (iocb->ki_users) { | 
 | 320 | 		set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 321 | 		if (!iocb->ki_users) | 
 | 322 | 			break; | 
 | 323 | 		schedule(); | 
 | 324 | 	} | 
 | 325 | 	__set_current_state(TASK_RUNNING); | 
 | 326 | 	return iocb->ki_user_data; | 
 | 327 | } | 
 | 328 |  | 
 | 329 | /* exit_aio: called when the last user of mm goes away.  At this point,  | 
 | 330 |  * there is no way for any new requests to be submited or any of the  | 
 | 331 |  * io_* syscalls to be called on the context.  However, there may be  | 
 | 332 |  * outstanding requests which hold references to the context; as they  | 
 | 333 |  * go away, they will call put_ioctx and release any pinned memory | 
 | 334 |  * associated with the request (held via struct page * references). | 
 | 335 |  */ | 
 | 336 | void fastcall exit_aio(struct mm_struct *mm) | 
 | 337 | { | 
 | 338 | 	struct kioctx *ctx = mm->ioctx_list; | 
 | 339 | 	mm->ioctx_list = NULL; | 
 | 340 | 	while (ctx) { | 
 | 341 | 		struct kioctx *next = ctx->next; | 
 | 342 | 		ctx->next = NULL; | 
 | 343 | 		aio_cancel_all(ctx); | 
 | 344 |  | 
 | 345 | 		wait_for_all_aios(ctx); | 
 | 346 | 		/* | 
 | 347 | 		 * this is an overkill, but ensures we don't leave | 
 | 348 | 		 * the ctx on the aio_wq | 
 | 349 | 		 */ | 
 | 350 | 		flush_workqueue(aio_wq); | 
 | 351 |  | 
 | 352 | 		if (1 != atomic_read(&ctx->users)) | 
 | 353 | 			printk(KERN_DEBUG | 
 | 354 | 				"exit_aio:ioctx still alive: %d %d %d\n", | 
 | 355 | 				atomic_read(&ctx->users), ctx->dead, | 
 | 356 | 				ctx->reqs_active); | 
 | 357 | 		put_ioctx(ctx); | 
 | 358 | 		ctx = next; | 
 | 359 | 	} | 
 | 360 | } | 
 | 361 |  | 
 | 362 | /* __put_ioctx | 
 | 363 |  *	Called when the last user of an aio context has gone away, | 
 | 364 |  *	and the struct needs to be freed. | 
 | 365 |  */ | 
 | 366 | void fastcall __put_ioctx(struct kioctx *ctx) | 
 | 367 | { | 
 | 368 | 	unsigned nr_events = ctx->max_reqs; | 
 | 369 |  | 
| Eric Sesterhenn | 93e06b4 | 2006-11-30 05:29:23 +0100 | [diff] [blame] | 370 | 	BUG_ON(ctx->reqs_active); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 |  | 
 | 372 | 	cancel_delayed_work(&ctx->wq); | 
 | 373 | 	flush_workqueue(aio_wq); | 
 | 374 | 	aio_free_ring(ctx); | 
 | 375 | 	mmdrop(ctx->mm); | 
 | 376 | 	ctx->mm = NULL; | 
 | 377 | 	pr_debug("__put_ioctx: freeing %p\n", ctx); | 
 | 378 | 	kmem_cache_free(kioctx_cachep, ctx); | 
 | 379 |  | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 380 | 	if (nr_events) { | 
 | 381 | 		spin_lock(&aio_nr_lock); | 
 | 382 | 		BUG_ON(aio_nr - nr_events > aio_nr); | 
 | 383 | 		aio_nr -= nr_events; | 
 | 384 | 		spin_unlock(&aio_nr_lock); | 
 | 385 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | } | 
 | 387 |  | 
 | 388 | /* aio_get_req | 
 | 389 |  *	Allocate a slot for an aio request.  Increments the users count | 
 | 390 |  * of the kioctx so that the kioctx stays around until all requests are | 
 | 391 |  * complete.  Returns NULL if no requests are free. | 
 | 392 |  * | 
 | 393 |  * Returns with kiocb->users set to 2.  The io submit code path holds | 
 | 394 |  * an extra reference while submitting the i/o. | 
 | 395 |  * This prevents races between the aio code path referencing the | 
 | 396 |  * req (after submitting it) and aio_complete() freeing the req. | 
 | 397 |  */ | 
 | 398 | static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx)); | 
 | 399 | static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | 
 | 400 | { | 
 | 401 | 	struct kiocb *req = NULL; | 
 | 402 | 	struct aio_ring *ring; | 
 | 403 | 	int okay = 0; | 
 | 404 |  | 
 | 405 | 	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); | 
 | 406 | 	if (unlikely(!req)) | 
 | 407 | 		return NULL; | 
 | 408 |  | 
| Zach Brown | 4faa528 | 2005-10-17 16:43:33 -0700 | [diff] [blame] | 409 | 	req->ki_flags = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | 	req->ki_users = 2; | 
 | 411 | 	req->ki_key = 0; | 
 | 412 | 	req->ki_ctx = ctx; | 
 | 413 | 	req->ki_cancel = NULL; | 
 | 414 | 	req->ki_retry = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | 	req->ki_dtor = NULL; | 
 | 416 | 	req->private = NULL; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 417 | 	req->ki_iovec = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | 	INIT_LIST_HEAD(&req->ki_run_list); | 
 | 419 |  | 
 | 420 | 	/* Check if the completion queue has enough free space to | 
 | 421 | 	 * accept an event from this io. | 
 | 422 | 	 */ | 
 | 423 | 	spin_lock_irq(&ctx->ctx_lock); | 
 | 424 | 	ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); | 
 | 425 | 	if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { | 
 | 426 | 		list_add(&req->ki_list, &ctx->active_reqs); | 
 | 427 | 		get_ioctx(ctx); | 
 | 428 | 		ctx->reqs_active++; | 
 | 429 | 		okay = 1; | 
 | 430 | 	} | 
 | 431 | 	kunmap_atomic(ring, KM_USER0); | 
 | 432 | 	spin_unlock_irq(&ctx->ctx_lock); | 
 | 433 |  | 
 | 434 | 	if (!okay) { | 
 | 435 | 		kmem_cache_free(kiocb_cachep, req); | 
 | 436 | 		req = NULL; | 
 | 437 | 	} | 
 | 438 |  | 
 | 439 | 	return req; | 
 | 440 | } | 
 | 441 |  | 
 | 442 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) | 
 | 443 | { | 
 | 444 | 	struct kiocb *req; | 
 | 445 | 	/* Handle a potential starvation case -- should be exceedingly rare as  | 
 | 446 | 	 * requests will be stuck on fput_head only if the aio_fput_routine is  | 
 | 447 | 	 * delayed and the requests were the last user of the struct file. | 
 | 448 | 	 */ | 
 | 449 | 	req = __aio_get_req(ctx); | 
 | 450 | 	if (unlikely(NULL == req)) { | 
 | 451 | 		aio_fput_routine(NULL); | 
 | 452 | 		req = __aio_get_req(ctx); | 
 | 453 | 	} | 
 | 454 | 	return req; | 
 | 455 | } | 
 | 456 |  | 
 | 457 | static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | 
 | 458 | { | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 459 | 	assert_spin_locked(&ctx->ctx_lock); | 
 | 460 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | 	if (req->ki_dtor) | 
 | 462 | 		req->ki_dtor(req); | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 463 | 	if (req->ki_iovec != &req->ki_inline_vec) | 
 | 464 | 		kfree(req->ki_iovec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | 	kmem_cache_free(kiocb_cachep, req); | 
 | 466 | 	ctx->reqs_active--; | 
 | 467 |  | 
 | 468 | 	if (unlikely(!ctx->reqs_active && ctx->dead)) | 
 | 469 | 		wake_up(&ctx->wait); | 
 | 470 | } | 
 | 471 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 472 | static void aio_fput_routine(struct work_struct *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | { | 
 | 474 | 	spin_lock_irq(&fput_lock); | 
 | 475 | 	while (likely(!list_empty(&fput_head))) { | 
 | 476 | 		struct kiocb *req = list_kiocb(fput_head.next); | 
 | 477 | 		struct kioctx *ctx = req->ki_ctx; | 
 | 478 |  | 
 | 479 | 		list_del(&req->ki_list); | 
 | 480 | 		spin_unlock_irq(&fput_lock); | 
 | 481 |  | 
 | 482 | 		/* Complete the fput */ | 
 | 483 | 		__fput(req->ki_filp); | 
 | 484 |  | 
 | 485 | 		/* Link the iocb into the context's free list */ | 
 | 486 | 		spin_lock_irq(&ctx->ctx_lock); | 
 | 487 | 		really_put_req(ctx, req); | 
 | 488 | 		spin_unlock_irq(&ctx->ctx_lock); | 
 | 489 |  | 
 | 490 | 		put_ioctx(ctx); | 
 | 491 | 		spin_lock_irq(&fput_lock); | 
 | 492 | 	} | 
 | 493 | 	spin_unlock_irq(&fput_lock); | 
 | 494 | } | 
 | 495 |  | 
 | 496 | /* __aio_put_req | 
 | 497 |  *	Returns true if this put was the last user of the request. | 
 | 498 |  */ | 
 | 499 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | 
 | 500 | { | 
 | 501 | 	dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n", | 
 | 502 | 		req, atomic_read(&req->ki_filp->f_count)); | 
 | 503 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 504 | 	assert_spin_locked(&ctx->ctx_lock); | 
 | 505 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | 	req->ki_users --; | 
| Eric Sesterhenn | 93e06b4 | 2006-11-30 05:29:23 +0100 | [diff] [blame] | 507 | 	BUG_ON(req->ki_users < 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | 	if (likely(req->ki_users)) | 
 | 509 | 		return 0; | 
 | 510 | 	list_del(&req->ki_list);		/* remove from active_reqs */ | 
 | 511 | 	req->ki_cancel = NULL; | 
 | 512 | 	req->ki_retry = NULL; | 
 | 513 |  | 
 | 514 | 	/* Must be done under the lock to serialise against cancellation. | 
 | 515 | 	 * Call this aio_fput as it duplicates fput via the fput_work. | 
 | 516 | 	 */ | 
| Nick Piggin | 095975d | 2006-01-08 01:02:19 -0800 | [diff] [blame] | 517 | 	if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | 		get_ioctx(ctx); | 
 | 519 | 		spin_lock(&fput_lock); | 
 | 520 | 		list_add(&req->ki_list, &fput_head); | 
 | 521 | 		spin_unlock(&fput_lock); | 
 | 522 | 		queue_work(aio_wq, &fput_work); | 
 | 523 | 	} else | 
 | 524 | 		really_put_req(ctx, req); | 
 | 525 | 	return 1; | 
 | 526 | } | 
 | 527 |  | 
 | 528 | /* aio_put_req | 
 | 529 |  *	Returns true if this put was the last user of the kiocb, | 
 | 530 |  *	false if the request is still in use. | 
 | 531 |  */ | 
 | 532 | int fastcall aio_put_req(struct kiocb *req) | 
 | 533 | { | 
 | 534 | 	struct kioctx *ctx = req->ki_ctx; | 
 | 535 | 	int ret; | 
 | 536 | 	spin_lock_irq(&ctx->ctx_lock); | 
 | 537 | 	ret = __aio_put_req(ctx, req); | 
 | 538 | 	spin_unlock_irq(&ctx->ctx_lock); | 
 | 539 | 	if (ret) | 
 | 540 | 		put_ioctx(ctx); | 
 | 541 | 	return ret; | 
 | 542 | } | 
 | 543 |  | 
 | 544 | /*	Lookup an ioctx id.  ioctx_list is lockless for reads. | 
 | 545 |  *	FIXME: this is O(n) and is only suitable for development. | 
 | 546 |  */ | 
 | 547 | struct kioctx *lookup_ioctx(unsigned long ctx_id) | 
 | 548 | { | 
 | 549 | 	struct kioctx *ioctx; | 
 | 550 | 	struct mm_struct *mm; | 
 | 551 |  | 
 | 552 | 	mm = current->mm; | 
 | 553 | 	read_lock(&mm->ioctx_list_lock); | 
 | 554 | 	for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next) | 
 | 555 | 		if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) { | 
 | 556 | 			get_ioctx(ioctx); | 
 | 557 | 			break; | 
 | 558 | 		} | 
 | 559 | 	read_unlock(&mm->ioctx_list_lock); | 
 | 560 |  | 
 | 561 | 	return ioctx; | 
 | 562 | } | 
 | 563 |  | 
 | 564 | /* | 
 | 565 |  * use_mm | 
 | 566 |  *	Makes the calling kernel thread take on the specified | 
 | 567 |  *	mm context. | 
 | 568 |  *	Called by the retry thread execute retries within the | 
 | 569 |  *	iocb issuer's mm context, so that copy_from/to_user | 
 | 570 |  *	operations work seamlessly for aio. | 
 | 571 |  *	(Note: this routine is intended to be called only | 
 | 572 |  *	from a kernel thread context) | 
 | 573 |  */ | 
 | 574 | static void use_mm(struct mm_struct *mm) | 
 | 575 | { | 
 | 576 | 	struct mm_struct *active_mm; | 
 | 577 | 	struct task_struct *tsk = current; | 
 | 578 |  | 
 | 579 | 	task_lock(tsk); | 
 | 580 | 	tsk->flags |= PF_BORROWED_MM; | 
 | 581 | 	active_mm = tsk->active_mm; | 
 | 582 | 	atomic_inc(&mm->mm_count); | 
 | 583 | 	tsk->mm = mm; | 
 | 584 | 	tsk->active_mm = mm; | 
| Paolo 'Blaisorblade' Giarrusso | 1e40cd3 | 2005-09-03 15:57:25 -0700 | [diff] [blame] | 585 | 	/* | 
 | 586 | 	 * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise | 
 | 587 | 	 * it won't work. Update it accordingly if you change it here | 
 | 588 | 	 */ | 
| Jeremy Fitzhardinge | 90aef12 | 2006-12-13 00:34:49 -0800 | [diff] [blame] | 589 | 	switch_mm(active_mm, mm, tsk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | 	task_unlock(tsk); | 
 | 591 |  | 
 | 592 | 	mmdrop(active_mm); | 
 | 593 | } | 
 | 594 |  | 
 | 595 | /* | 
 | 596 |  * unuse_mm | 
 | 597 |  *	Reverses the effect of use_mm, i.e. releases the | 
 | 598 |  *	specified mm context which was earlier taken on | 
 | 599 |  *	by the calling kernel thread | 
 | 600 |  *	(Note: this routine is intended to be called only | 
 | 601 |  *	from a kernel thread context) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 |  */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 603 | static void unuse_mm(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | { | 
 | 605 | 	struct task_struct *tsk = current; | 
 | 606 |  | 
 | 607 | 	task_lock(tsk); | 
 | 608 | 	tsk->flags &= ~PF_BORROWED_MM; | 
 | 609 | 	tsk->mm = NULL; | 
 | 610 | 	/* active_mm is still 'mm' */ | 
 | 611 | 	enter_lazy_tlb(mm, tsk); | 
 | 612 | 	task_unlock(tsk); | 
 | 613 | } | 
 | 614 |  | 
 | 615 | /* | 
 | 616 |  * Queue up a kiocb to be retried. Assumes that the kiocb | 
 | 617 |  * has already been marked as kicked, and places it on | 
 | 618 |  * the retry run list for the corresponding ioctx, if it | 
 | 619 |  * isn't already queued. Returns 1 if it actually queued | 
 | 620 |  * the kiocb (to tell the caller to activate the work | 
 | 621 |  * queue to process it), or 0, if it found that it was | 
 | 622 |  * already queued. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 |  */ | 
 | 624 | static inline int __queue_kicked_iocb(struct kiocb *iocb) | 
 | 625 | { | 
 | 626 | 	struct kioctx *ctx = iocb->ki_ctx; | 
 | 627 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 628 | 	assert_spin_locked(&ctx->ctx_lock); | 
 | 629 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | 	if (list_empty(&iocb->ki_run_list)) { | 
 | 631 | 		list_add_tail(&iocb->ki_run_list, | 
 | 632 | 			&ctx->run_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | 		return 1; | 
 | 634 | 	} | 
 | 635 | 	return 0; | 
 | 636 | } | 
 | 637 |  | 
 | 638 | /* aio_run_iocb | 
 | 639 |  *	This is the core aio execution routine. It is | 
 | 640 |  *	invoked both for initial i/o submission and | 
 | 641 |  *	subsequent retries via the aio_kick_handler. | 
 | 642 |  *	Expects to be invoked with iocb->ki_ctx->lock | 
| Andreas Mohr | d6e05ed | 2006-06-26 18:35:02 +0200 | [diff] [blame] | 643 |  *	already held. The lock is released and reacquired | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 |  *	as needed during processing. | 
 | 645 |  * | 
 | 646 |  * Calls the iocb retry method (already setup for the | 
 | 647 |  * iocb on initial submission) for operation specific | 
 | 648 |  * handling, but takes care of most of common retry | 
 | 649 |  * execution details for a given iocb. The retry method | 
 | 650 |  * needs to be non-blocking as far as possible, to avoid | 
 | 651 |  * holding up other iocbs waiting to be serviced by the | 
 | 652 |  * retry kernel thread. | 
 | 653 |  * | 
 | 654 |  * The trickier parts in this code have to do with | 
 | 655 |  * ensuring that only one retry instance is in progress | 
 | 656 |  * for a given iocb at any time. Providing that guarantee | 
 | 657 |  * simplifies the coding of individual aio operations as | 
 | 658 |  * it avoids various potential races. | 
 | 659 |  */ | 
 | 660 | static ssize_t aio_run_iocb(struct kiocb *iocb) | 
 | 661 | { | 
 | 662 | 	struct kioctx	*ctx = iocb->ki_ctx; | 
 | 663 | 	ssize_t (*retry)(struct kiocb *); | 
 | 664 | 	ssize_t ret; | 
 | 665 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | 	if (!(retry = iocb->ki_retry)) { | 
 | 667 | 		printk("aio_run_iocb: iocb->ki_retry = NULL\n"); | 
 | 668 | 		return 0; | 
 | 669 | 	} | 
 | 670 |  | 
 | 671 | 	/* | 
 | 672 | 	 * We don't want the next retry iteration for this | 
 | 673 | 	 * operation to start until this one has returned and | 
 | 674 | 	 * updated the iocb state. However, wait_queue functions | 
 | 675 | 	 * can trigger a kick_iocb from interrupt context in the | 
 | 676 | 	 * meantime, indicating that data is available for the next | 
 | 677 | 	 * iteration. We want to remember that and enable the | 
 | 678 | 	 * next retry iteration _after_ we are through with | 
 | 679 | 	 * this one. | 
 | 680 | 	 * | 
 | 681 | 	 * So, in order to be able to register a "kick", but | 
 | 682 | 	 * prevent it from being queued now, we clear the kick | 
 | 683 | 	 * flag, but make the kick code *think* that the iocb is | 
 | 684 | 	 * still on the run list until we are actually done. | 
 | 685 | 	 * When we are done with this iteration, we check if | 
 | 686 | 	 * the iocb was kicked in the meantime and if so, queue | 
 | 687 | 	 * it up afresh. | 
 | 688 | 	 */ | 
 | 689 |  | 
 | 690 | 	kiocbClearKicked(iocb); | 
 | 691 |  | 
 | 692 | 	/* | 
 | 693 | 	 * This is so that aio_complete knows it doesn't need to | 
 | 694 | 	 * pull the iocb off the run list (We can't just call | 
 | 695 | 	 * INIT_LIST_HEAD because we don't want a kick_iocb to | 
 | 696 | 	 * queue this on the run list yet) | 
 | 697 | 	 */ | 
 | 698 | 	iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; | 
 | 699 | 	spin_unlock_irq(&ctx->ctx_lock); | 
 | 700 |  | 
 | 701 | 	/* Quit retrying if the i/o has been cancelled */ | 
 | 702 | 	if (kiocbIsCancelled(iocb)) { | 
 | 703 | 		ret = -EINTR; | 
 | 704 | 		aio_complete(iocb, ret, 0); | 
 | 705 | 		/* must not access the iocb after this */ | 
 | 706 | 		goto out; | 
 | 707 | 	} | 
 | 708 |  | 
 | 709 | 	/* | 
 | 710 | 	 * Now we are all set to call the retry method in async | 
 | 711 | 	 * context. By setting this thread's io_wait context | 
 | 712 | 	 * to point to the wait queue entry inside the currently | 
 | 713 | 	 * running iocb for the duration of the retry, we ensure | 
 | 714 | 	 * that async notification wakeups are queued by the | 
 | 715 | 	 * operation instead of blocking waits, and when notified, | 
 | 716 | 	 * cause the iocb to be kicked for continuation (through | 
 | 717 | 	 * the aio_wake_function callback). | 
 | 718 | 	 */ | 
 | 719 | 	BUG_ON(current->io_wait != NULL); | 
 | 720 | 	current->io_wait = &iocb->ki_wait; | 
 | 721 | 	ret = retry(iocb); | 
 | 722 | 	current->io_wait = NULL; | 
 | 723 |  | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 724 | 	if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { | 
 | 725 | 		BUG_ON(!list_empty(&iocb->ki_wait.task_list)); | 
 | 726 | 		aio_complete(iocb, ret, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | 	} | 
 | 728 | out: | 
 | 729 | 	spin_lock_irq(&ctx->ctx_lock); | 
 | 730 |  | 
 | 731 | 	if (-EIOCBRETRY == ret) { | 
 | 732 | 		/* | 
 | 733 | 		 * OK, now that we are done with this iteration | 
 | 734 | 		 * and know that there is more left to go, | 
 | 735 | 		 * this is where we let go so that a subsequent | 
 | 736 | 		 * "kick" can start the next iteration | 
 | 737 | 		 */ | 
 | 738 |  | 
 | 739 | 		/* will make __queue_kicked_iocb succeed from here on */ | 
 | 740 | 		INIT_LIST_HEAD(&iocb->ki_run_list); | 
 | 741 | 		/* we must queue the next iteration ourselves, if it | 
 | 742 | 		 * has already been kicked */ | 
 | 743 | 		if (kiocbIsKicked(iocb)) { | 
 | 744 | 			__queue_kicked_iocb(iocb); | 
| Sébastien Dugu | c016e22 | 2005-06-28 20:44:59 -0700 | [diff] [blame] | 745 |  | 
 | 746 | 			/* | 
 | 747 | 			 * __queue_kicked_iocb will always return 1 here, because | 
 | 748 | 			 * iocb->ki_run_list is empty at this point so it should | 
 | 749 | 			 * be safe to unconditionally queue the context into the | 
 | 750 | 			 * work queue. | 
 | 751 | 			 */ | 
 | 752 | 			aio_queue_work(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | 		} | 
 | 754 | 	} | 
 | 755 | 	return ret; | 
 | 756 | } | 
 | 757 |  | 
 | 758 | /* | 
 | 759 |  * __aio_run_iocbs: | 
 | 760 |  * 	Process all pending retries queued on the ioctx | 
 | 761 |  * 	run list. | 
 | 762 |  * Assumes it is operating within the aio issuer's mm | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 763 |  * context. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 |  */ | 
 | 765 | static int __aio_run_iocbs(struct kioctx *ctx) | 
 | 766 | { | 
 | 767 | 	struct kiocb *iocb; | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 768 | 	struct list_head run_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 |  | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 770 | 	assert_spin_locked(&ctx->ctx_lock); | 
 | 771 |  | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 772 | 	list_replace_init(&ctx->run_list, &run_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | 	while (!list_empty(&run_list)) { | 
 | 774 | 		iocb = list_entry(run_list.next, struct kiocb, | 
 | 775 | 			ki_run_list); | 
 | 776 | 		list_del(&iocb->ki_run_list); | 
 | 777 | 		/* | 
 | 778 | 		 * Hold an extra reference while retrying i/o. | 
 | 779 | 		 */ | 
 | 780 | 		iocb->ki_users++;       /* grab extra reference */ | 
 | 781 | 		aio_run_iocb(iocb); | 
 | 782 | 		if (__aio_put_req(ctx, iocb))  /* drop extra ref */ | 
 | 783 | 			put_ioctx(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 |  	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | 	if (!list_empty(&ctx->run_list)) | 
 | 786 | 		return 1; | 
 | 787 | 	return 0; | 
 | 788 | } | 
 | 789 |  | 
 | 790 | static void aio_queue_work(struct kioctx * ctx) | 
 | 791 | { | 
 | 792 | 	unsigned long timeout; | 
 | 793 | 	/* | 
 | 794 | 	 * if someone is waiting, get the work started right | 
 | 795 | 	 * away, otherwise, use a longer delay | 
 | 796 | 	 */ | 
 | 797 | 	smp_mb(); | 
 | 798 | 	if (waitqueue_active(&ctx->wait)) | 
 | 799 | 		timeout = 1; | 
 | 800 | 	else | 
 | 801 | 		timeout = HZ/10; | 
 | 802 | 	queue_delayed_work(aio_wq, &ctx->wq, timeout); | 
 | 803 | } | 
 | 804 |  | 
 | 805 |  | 
 | 806 | /* | 
 | 807 |  * aio_run_iocbs: | 
 | 808 |  * 	Process all pending retries queued on the ioctx | 
 | 809 |  * 	run list. | 
 | 810 |  * Assumes it is operating within the aio issuer's mm | 
 | 811 |  * context. | 
 | 812 |  */ | 
 | 813 | static inline void aio_run_iocbs(struct kioctx *ctx) | 
 | 814 | { | 
 | 815 | 	int requeue; | 
 | 816 |  | 
 | 817 | 	spin_lock_irq(&ctx->ctx_lock); | 
 | 818 |  | 
 | 819 | 	requeue = __aio_run_iocbs(ctx); | 
 | 820 | 	spin_unlock_irq(&ctx->ctx_lock); | 
 | 821 | 	if (requeue) | 
 | 822 | 		aio_queue_work(ctx); | 
 | 823 | } | 
 | 824 |  | 
 | 825 | /* | 
 | 826 |  * just like aio_run_iocbs, but keeps running them until | 
 | 827 |  * the list stays empty | 
 | 828 |  */ | 
 | 829 | static inline void aio_run_all_iocbs(struct kioctx *ctx) | 
 | 830 | { | 
 | 831 | 	spin_lock_irq(&ctx->ctx_lock); | 
 | 832 | 	while (__aio_run_iocbs(ctx)) | 
 | 833 | 		; | 
 | 834 | 	spin_unlock_irq(&ctx->ctx_lock); | 
 | 835 | } | 
 | 836 |  | 
 | 837 | /* | 
 | 838 |  * aio_kick_handler: | 
 | 839 |  * 	Work queue handler triggered to process pending | 
 | 840 |  * 	retries on an ioctx. Takes on the aio issuer's | 
 | 841 |  *	mm context before running the iocbs, so that | 
 | 842 |  *	copy_xxx_user operates on the issuer's address | 
 | 843 |  *      space. | 
 | 844 |  * Run on aiod's context. | 
 | 845 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 846 | static void aio_kick_handler(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 848 | 	struct kioctx *ctx = container_of(work, struct kioctx, wq.work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | 	mm_segment_t oldfs = get_fs(); | 
| Zach Brown | 1ebb110 | 2006-12-29 16:47:02 -0800 | [diff] [blame] | 850 | 	struct mm_struct *mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | 	int requeue; | 
 | 852 |  | 
 | 853 | 	set_fs(USER_DS); | 
 | 854 | 	use_mm(ctx->mm); | 
 | 855 | 	spin_lock_irq(&ctx->ctx_lock); | 
 | 856 | 	requeue =__aio_run_iocbs(ctx); | 
| Zach Brown | 1ebb110 | 2006-12-29 16:47:02 -0800 | [diff] [blame] | 857 | 	mm = ctx->mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | 	spin_unlock_irq(&ctx->ctx_lock); | 
| Zach Brown | 1ebb110 | 2006-12-29 16:47:02 -0800 | [diff] [blame] | 859 |  	unuse_mm(mm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | 	set_fs(oldfs); | 
 | 861 | 	/* | 
 | 862 | 	 * we're in a worker thread already, don't use queue_delayed_work, | 
 | 863 | 	 */ | 
 | 864 | 	if (requeue) | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 865 | 		queue_delayed_work(aio_wq, &ctx->wq, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 866 | } | 
 | 867 |  | 
 | 868 |  | 
 | 869 | /* | 
 | 870 |  * Called by kick_iocb to queue the kiocb for retry | 
 | 871 |  * and if required activate the aio work queue to process | 
 | 872 |  * it | 
 | 873 |  */ | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 874 | static void try_queue_kicked_iocb(struct kiocb *iocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | { | 
 | 876 |  	struct kioctx	*ctx = iocb->ki_ctx; | 
 | 877 | 	unsigned long flags; | 
 | 878 | 	int run = 0; | 
 | 879 |  | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 880 | 	/* We're supposed to be the only path putting the iocb back on the run | 
 | 881 | 	 * list.  If we find that the iocb is *back* on a wait queue already | 
 | 882 | 	 * than retry has happened before we could queue the iocb.  This also | 
 | 883 | 	 * means that the retry could have completed and freed our iocb, no | 
 | 884 | 	 * good. */ | 
 | 885 | 	BUG_ON((!list_empty(&iocb->ki_wait.task_list))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 |  | 
 | 887 | 	spin_lock_irqsave(&ctx->ctx_lock, flags); | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 888 | 	/* set this inside the lock so that we can't race with aio_run_iocb() | 
 | 889 | 	 * testing it and putting the iocb on the run list under the lock */ | 
 | 890 | 	if (!kiocbTryKick(iocb)) | 
 | 891 | 		run = __queue_kicked_iocb(iocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | 	spin_unlock_irqrestore(&ctx->ctx_lock, flags); | 
| Ken Chen | 644d3a0 | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 893 | 	if (run) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | 		aio_queue_work(ctx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | } | 
 | 896 |  | 
 | 897 | /* | 
 | 898 |  * kick_iocb: | 
 | 899 |  *      Called typically from a wait queue callback context | 
 | 900 |  *      (aio_wake_function) to trigger a retry of the iocb. | 
 | 901 |  *      The retry is usually executed by aio workqueue | 
 | 902 |  *      threads (See aio_kick_handler). | 
 | 903 |  */ | 
 | 904 | void fastcall kick_iocb(struct kiocb *iocb) | 
 | 905 | { | 
 | 906 | 	/* sync iocbs are easy: they can only ever be executing from a  | 
 | 907 | 	 * single context. */ | 
 | 908 | 	if (is_sync_kiocb(iocb)) { | 
 | 909 | 		kiocbSetKicked(iocb); | 
 | 910 | 	        wake_up_process(iocb->ki_obj.tsk); | 
 | 911 | 		return; | 
 | 912 | 	} | 
 | 913 |  | 
| Zach Brown | 998765e | 2005-09-30 11:58:54 -0700 | [diff] [blame] | 914 | 	try_queue_kicked_iocb(iocb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | } | 
 | 916 | EXPORT_SYMBOL(kick_iocb); | 
 | 917 |  | 
 | 918 | /* aio_complete | 
 | 919 |  *	Called when the io request on the given iocb is complete. | 
 | 920 |  *	Returns true if this is the last user of the request.  The  | 
 | 921 |  *	only other user of the request can be the cancellation code. | 
 | 922 |  */ | 
 | 923 | int fastcall aio_complete(struct kiocb *iocb, long res, long res2) | 
 | 924 | { | 
 | 925 | 	struct kioctx	*ctx = iocb->ki_ctx; | 
 | 926 | 	struct aio_ring_info	*info; | 
 | 927 | 	struct aio_ring	*ring; | 
 | 928 | 	struct io_event	*event; | 
 | 929 | 	unsigned long	flags; | 
 | 930 | 	unsigned long	tail; | 
 | 931 | 	int		ret; | 
 | 932 |  | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 933 | 	/* | 
 | 934 | 	 * Special case handling for sync iocbs: | 
 | 935 | 	 *  - events go directly into the iocb for fast handling | 
 | 936 | 	 *  - the sync task with the iocb in its stack holds the single iocb | 
 | 937 | 	 *    ref, no other paths have a way to get another ref | 
 | 938 | 	 *  - the sync task helpfully left a reference to itself in the iocb | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 939 | 	 */ | 
 | 940 | 	if (is_sync_kiocb(iocb)) { | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 941 | 		BUG_ON(iocb->ki_users != 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | 		iocb->ki_user_data = res; | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 943 | 		iocb->ki_users = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | 		wake_up_process(iocb->ki_obj.tsk); | 
| Zach Brown | 20dcae3 | 2005-11-13 16:07:33 -0800 | [diff] [blame] | 945 | 		return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 | 	} | 
 | 947 |  | 
 | 948 | 	info = &ctx->ring_info; | 
 | 949 |  | 
 | 950 | 	/* add a completion event to the ring buffer. | 
 | 951 | 	 * must be done holding ctx->ctx_lock to prevent | 
 | 952 | 	 * other code from messing with the tail | 
 | 953 | 	 * pointer since we might be called from irq | 
 | 954 | 	 * context. | 
 | 955 | 	 */ | 
 | 956 | 	spin_lock_irqsave(&ctx->ctx_lock, flags); | 
 | 957 |  | 
 | 958 | 	if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) | 
 | 959 | 		list_del_init(&iocb->ki_run_list); | 
 | 960 |  | 
 | 961 | 	/* | 
 | 962 | 	 * cancelled requests don't get events, userland was given one | 
 | 963 | 	 * when the event got cancelled. | 
 | 964 | 	 */ | 
 | 965 | 	if (kiocbIsCancelled(iocb)) | 
 | 966 | 		goto put_rq; | 
 | 967 |  | 
 | 968 | 	ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); | 
 | 969 |  | 
 | 970 | 	tail = info->tail; | 
 | 971 | 	event = aio_ring_event(info, tail, KM_IRQ0); | 
| Ken Chen | 4bf69b2 | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 972 | 	if (++tail >= info->nr) | 
 | 973 | 		tail = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 |  | 
 | 975 | 	event->obj = (u64)(unsigned long)iocb->ki_obj.user; | 
 | 976 | 	event->data = iocb->ki_user_data; | 
 | 977 | 	event->res = res; | 
 | 978 | 	event->res2 = res2; | 
 | 979 |  | 
 | 980 | 	dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", | 
 | 981 | 		ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, | 
 | 982 | 		res, res2); | 
 | 983 |  | 
 | 984 | 	/* after flagging the request as done, we | 
 | 985 | 	 * must never even look at it again | 
 | 986 | 	 */ | 
 | 987 | 	smp_wmb();	/* make event visible before updating tail */ | 
 | 988 |  | 
 | 989 | 	info->tail = tail; | 
 | 990 | 	ring->tail = tail; | 
 | 991 |  | 
 | 992 | 	put_aio_ring_event(event, KM_IRQ0); | 
 | 993 | 	kunmap_atomic(ring, KM_IRQ1); | 
 | 994 |  | 
 | 995 | 	pr_debug("added to ring %p at [%lu]\n", iocb, tail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | put_rq: | 
 | 997 | 	/* everything turned out well, dispose of the aiocb. */ | 
 | 998 | 	ret = __aio_put_req(ctx, iocb); | 
 | 999 |  | 
 | 1000 | 	spin_unlock_irqrestore(&ctx->ctx_lock, flags); | 
 | 1001 |  | 
 | 1002 | 	if (waitqueue_active(&ctx->wait)) | 
 | 1003 | 		wake_up(&ctx->wait); | 
 | 1004 |  | 
 | 1005 | 	if (ret) | 
 | 1006 | 		put_ioctx(ctx); | 
 | 1007 |  | 
 | 1008 | 	return ret; | 
 | 1009 | } | 
 | 1010 |  | 
 | 1011 | /* aio_read_evt | 
 | 1012 |  *	Pull an event off of the ioctx's event ring.  Returns the number of  | 
 | 1013 |  *	events fetched (0 or 1 ;-) | 
 | 1014 |  *	FIXME: make this use cmpxchg. | 
 | 1015 |  *	TODO: make the ringbuffer user mmap()able (requires FIXME). | 
 | 1016 |  */ | 
 | 1017 | static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) | 
 | 1018 | { | 
 | 1019 | 	struct aio_ring_info *info = &ioctx->ring_info; | 
 | 1020 | 	struct aio_ring *ring; | 
 | 1021 | 	unsigned long head; | 
 | 1022 | 	int ret = 0; | 
 | 1023 |  | 
 | 1024 | 	ring = kmap_atomic(info->ring_pages[0], KM_USER0); | 
 | 1025 | 	dprintk("in aio_read_evt h%lu t%lu m%lu\n", | 
 | 1026 | 		 (unsigned long)ring->head, (unsigned long)ring->tail, | 
 | 1027 | 		 (unsigned long)ring->nr); | 
 | 1028 |  | 
 | 1029 | 	if (ring->head == ring->tail) | 
 | 1030 | 		goto out; | 
 | 1031 |  | 
 | 1032 | 	spin_lock(&info->ring_lock); | 
 | 1033 |  | 
 | 1034 | 	head = ring->head % info->nr; | 
 | 1035 | 	if (head != ring->tail) { | 
 | 1036 | 		struct io_event *evp = aio_ring_event(info, head, KM_USER1); | 
 | 1037 | 		*ent = *evp; | 
 | 1038 | 		head = (head + 1) % info->nr; | 
 | 1039 | 		smp_mb(); /* finish reading the event before updatng the head */ | 
 | 1040 | 		ring->head = head; | 
 | 1041 | 		ret = 1; | 
 | 1042 | 		put_aio_ring_event(evp, KM_USER1); | 
 | 1043 | 	} | 
 | 1044 | 	spin_unlock(&info->ring_lock); | 
 | 1045 |  | 
 | 1046 | out: | 
 | 1047 | 	kunmap_atomic(ring, KM_USER0); | 
 | 1048 | 	dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret, | 
 | 1049 | 		 (unsigned long)ring->head, (unsigned long)ring->tail); | 
 | 1050 | 	return ret; | 
 | 1051 | } | 
 | 1052 |  | 
 | 1053 | struct aio_timeout { | 
 | 1054 | 	struct timer_list	timer; | 
 | 1055 | 	int			timed_out; | 
 | 1056 | 	struct task_struct	*p; | 
 | 1057 | }; | 
 | 1058 |  | 
 | 1059 | static void timeout_func(unsigned long data) | 
 | 1060 | { | 
 | 1061 | 	struct aio_timeout *to = (struct aio_timeout *)data; | 
 | 1062 |  | 
 | 1063 | 	to->timed_out = 1; | 
 | 1064 | 	wake_up_process(to->p); | 
 | 1065 | } | 
 | 1066 |  | 
 | 1067 | static inline void init_timeout(struct aio_timeout *to) | 
 | 1068 | { | 
 | 1069 | 	init_timer(&to->timer); | 
 | 1070 | 	to->timer.data = (unsigned long)to; | 
 | 1071 | 	to->timer.function = timeout_func; | 
 | 1072 | 	to->timed_out = 0; | 
 | 1073 | 	to->p = current; | 
 | 1074 | } | 
 | 1075 |  | 
 | 1076 | static inline void set_timeout(long start_jiffies, struct aio_timeout *to, | 
 | 1077 | 			       const struct timespec *ts) | 
 | 1078 | { | 
 | 1079 | 	to->timer.expires = start_jiffies + timespec_to_jiffies(ts); | 
 | 1080 | 	if (time_after(to->timer.expires, jiffies)) | 
 | 1081 | 		add_timer(&to->timer); | 
 | 1082 | 	else | 
 | 1083 | 		to->timed_out = 1; | 
 | 1084 | } | 
 | 1085 |  | 
 | 1086 | static inline void clear_timeout(struct aio_timeout *to) | 
 | 1087 | { | 
 | 1088 | 	del_singleshot_timer_sync(&to->timer); | 
 | 1089 | } | 
 | 1090 |  | 
 | 1091 | static int read_events(struct kioctx *ctx, | 
 | 1092 | 			long min_nr, long nr, | 
 | 1093 | 			struct io_event __user *event, | 
 | 1094 | 			struct timespec __user *timeout) | 
 | 1095 | { | 
 | 1096 | 	long			start_jiffies = jiffies; | 
 | 1097 | 	struct task_struct	*tsk = current; | 
 | 1098 | 	DECLARE_WAITQUEUE(wait, tsk); | 
 | 1099 | 	int			ret; | 
 | 1100 | 	int			i = 0; | 
 | 1101 | 	struct io_event		ent; | 
 | 1102 | 	struct aio_timeout	to; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | 	int			retry = 0; | 
 | 1104 |  | 
 | 1105 | 	/* needed to zero any padding within an entry (there shouldn't be  | 
 | 1106 | 	 * any, but C is fun! | 
 | 1107 | 	 */ | 
 | 1108 | 	memset(&ent, 0, sizeof(ent)); | 
 | 1109 | retry: | 
 | 1110 | 	ret = 0; | 
 | 1111 | 	while (likely(i < nr)) { | 
 | 1112 | 		ret = aio_read_evt(ctx, &ent); | 
 | 1113 | 		if (unlikely(ret <= 0)) | 
 | 1114 | 			break; | 
 | 1115 |  | 
 | 1116 | 		dprintk("read event: %Lx %Lx %Lx %Lx\n", | 
 | 1117 | 			ent.data, ent.obj, ent.res, ent.res2); | 
 | 1118 |  | 
 | 1119 | 		/* Could we split the check in two? */ | 
 | 1120 | 		ret = -EFAULT; | 
 | 1121 | 		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { | 
 | 1122 | 			dprintk("aio: lost an event due to EFAULT.\n"); | 
 | 1123 | 			break; | 
 | 1124 | 		} | 
 | 1125 | 		ret = 0; | 
 | 1126 |  | 
 | 1127 | 		/* Good, event copied to userland, update counts. */ | 
 | 1128 | 		event ++; | 
 | 1129 | 		i ++; | 
 | 1130 | 	} | 
 | 1131 |  | 
 | 1132 | 	if (min_nr <= i) | 
 | 1133 | 		return i; | 
 | 1134 | 	if (ret) | 
 | 1135 | 		return ret; | 
 | 1136 |  | 
 | 1137 | 	/* End fast path */ | 
 | 1138 |  | 
 | 1139 | 	/* racey check, but it gets redone */ | 
 | 1140 | 	if (!retry && unlikely(!list_empty(&ctx->run_list))) { | 
 | 1141 | 		retry = 1; | 
 | 1142 | 		aio_run_all_iocbs(ctx); | 
 | 1143 | 		goto retry; | 
 | 1144 | 	} | 
 | 1145 |  | 
 | 1146 | 	init_timeout(&to); | 
 | 1147 | 	if (timeout) { | 
 | 1148 | 		struct timespec	ts; | 
 | 1149 | 		ret = -EFAULT; | 
 | 1150 | 		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) | 
 | 1151 | 			goto out; | 
 | 1152 |  | 
 | 1153 | 		set_timeout(start_jiffies, &to, &ts); | 
 | 1154 | 	} | 
 | 1155 |  | 
 | 1156 | 	while (likely(i < nr)) { | 
 | 1157 | 		add_wait_queue_exclusive(&ctx->wait, &wait); | 
 | 1158 | 		do { | 
 | 1159 | 			set_task_state(tsk, TASK_INTERRUPTIBLE); | 
 | 1160 | 			ret = aio_read_evt(ctx, &ent); | 
 | 1161 | 			if (ret) | 
 | 1162 | 				break; | 
 | 1163 | 			if (min_nr <= i) | 
 | 1164 | 				break; | 
 | 1165 | 			ret = 0; | 
 | 1166 | 			if (to.timed_out)	/* Only check after read evt */ | 
 | 1167 | 				break; | 
 | 1168 | 			schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | 			if (signal_pending(tsk)) { | 
 | 1170 | 				ret = -EINTR; | 
 | 1171 | 				break; | 
 | 1172 | 			} | 
 | 1173 | 			/*ret = aio_read_evt(ctx, &ent);*/ | 
 | 1174 | 		} while (1) ; | 
 | 1175 |  | 
 | 1176 | 		set_task_state(tsk, TASK_RUNNING); | 
 | 1177 | 		remove_wait_queue(&ctx->wait, &wait); | 
 | 1178 |  | 
 | 1179 | 		if (unlikely(ret <= 0)) | 
 | 1180 | 			break; | 
 | 1181 |  | 
 | 1182 | 		ret = -EFAULT; | 
 | 1183 | 		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { | 
 | 1184 | 			dprintk("aio: lost an event due to EFAULT.\n"); | 
 | 1185 | 			break; | 
 | 1186 | 		} | 
 | 1187 |  | 
 | 1188 | 		/* Good, event copied to userland, update counts. */ | 
 | 1189 | 		event ++; | 
 | 1190 | 		i ++; | 
 | 1191 | 	} | 
 | 1192 |  | 
 | 1193 | 	if (timeout) | 
 | 1194 | 		clear_timeout(&to); | 
 | 1195 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 | 	return i ? i : ret; | 
 | 1197 | } | 
 | 1198 |  | 
 | 1199 | /* Take an ioctx and remove it from the list of ioctx's.  Protects  | 
 | 1200 |  * against races with itself via ->dead. | 
 | 1201 |  */ | 
 | 1202 | static void io_destroy(struct kioctx *ioctx) | 
 | 1203 | { | 
 | 1204 | 	struct mm_struct *mm = current->mm; | 
 | 1205 | 	struct kioctx **tmp; | 
 | 1206 | 	int was_dead; | 
 | 1207 |  | 
 | 1208 | 	/* delete the entry from the list is someone else hasn't already */ | 
 | 1209 | 	write_lock(&mm->ioctx_list_lock); | 
 | 1210 | 	was_dead = ioctx->dead; | 
 | 1211 | 	ioctx->dead = 1; | 
 | 1212 | 	for (tmp = &mm->ioctx_list; *tmp && *tmp != ioctx; | 
 | 1213 | 	     tmp = &(*tmp)->next) | 
 | 1214 | 		; | 
 | 1215 | 	if (*tmp) | 
 | 1216 | 		*tmp = ioctx->next; | 
 | 1217 | 	write_unlock(&mm->ioctx_list_lock); | 
 | 1218 |  | 
 | 1219 | 	dprintk("aio_release(%p)\n", ioctx); | 
 | 1220 | 	if (likely(!was_dead)) | 
 | 1221 | 		put_ioctx(ioctx);	/* twice for the list */ | 
 | 1222 |  | 
 | 1223 | 	aio_cancel_all(ioctx); | 
 | 1224 | 	wait_for_all_aios(ioctx); | 
 | 1225 | 	put_ioctx(ioctx);	/* once for the lookup */ | 
 | 1226 | } | 
 | 1227 |  | 
 | 1228 | /* sys_io_setup: | 
 | 1229 |  *	Create an aio_context capable of receiving at least nr_events. | 
 | 1230 |  *	ctxp must not point to an aio_context that already exists, and | 
 | 1231 |  *	must be initialized to 0 prior to the call.  On successful | 
 | 1232 |  *	creation of the aio_context, *ctxp is filled in with the resulting  | 
 | 1233 |  *	handle.  May fail with -EINVAL if *ctxp is not initialized, | 
 | 1234 |  *	if the specified nr_events exceeds internal limits.  May fail  | 
 | 1235 |  *	with -EAGAIN if the specified nr_events exceeds the user's limit  | 
 | 1236 |  *	of available events.  May fail with -ENOMEM if insufficient kernel | 
 | 1237 |  *	resources are available.  May fail with -EFAULT if an invalid | 
 | 1238 |  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not | 
 | 1239 |  *	implemented. | 
 | 1240 |  */ | 
 | 1241 | asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp) | 
 | 1242 | { | 
 | 1243 | 	struct kioctx *ioctx = NULL; | 
 | 1244 | 	unsigned long ctx; | 
 | 1245 | 	long ret; | 
 | 1246 |  | 
 | 1247 | 	ret = get_user(ctx, ctxp); | 
 | 1248 | 	if (unlikely(ret)) | 
 | 1249 | 		goto out; | 
 | 1250 |  | 
 | 1251 | 	ret = -EINVAL; | 
| Zach Brown | d55b5fd | 2005-11-07 00:59:31 -0800 | [diff] [blame] | 1252 | 	if (unlikely(ctx || nr_events == 0)) { | 
 | 1253 | 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", | 
 | 1254 | 		         ctx, nr_events); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1255 | 		goto out; | 
 | 1256 | 	} | 
 | 1257 |  | 
 | 1258 | 	ioctx = ioctx_alloc(nr_events); | 
 | 1259 | 	ret = PTR_ERR(ioctx); | 
 | 1260 | 	if (!IS_ERR(ioctx)) { | 
 | 1261 | 		ret = put_user(ioctx->user_id, ctxp); | 
 | 1262 | 		if (!ret) | 
 | 1263 | 			return 0; | 
 | 1264 |  | 
 | 1265 | 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ | 
 | 1266 | 		io_destroy(ioctx); | 
 | 1267 | 	} | 
 | 1268 |  | 
 | 1269 | out: | 
 | 1270 | 	return ret; | 
 | 1271 | } | 
 | 1272 |  | 
 | 1273 | /* sys_io_destroy: | 
 | 1274 |  *	Destroy the aio_context specified.  May cancel any outstanding  | 
 | 1275 |  *	AIOs and block on completion.  Will fail with -ENOSYS if not | 
 | 1276 |  *	implemented.  May fail with -EFAULT if the context pointed to | 
 | 1277 |  *	is invalid. | 
 | 1278 |  */ | 
 | 1279 | asmlinkage long sys_io_destroy(aio_context_t ctx) | 
 | 1280 | { | 
 | 1281 | 	struct kioctx *ioctx = lookup_ioctx(ctx); | 
 | 1282 | 	if (likely(NULL != ioctx)) { | 
 | 1283 | 		io_destroy(ioctx); | 
 | 1284 | 		return 0; | 
 | 1285 | 	} | 
 | 1286 | 	pr_debug("EINVAL: io_destroy: invalid context id\n"); | 
 | 1287 | 	return -EINVAL; | 
 | 1288 | } | 
 | 1289 |  | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1290 | static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) | 
 | 1291 | { | 
 | 1292 | 	struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; | 
 | 1293 |  | 
 | 1294 | 	BUG_ON(ret <= 0); | 
 | 1295 |  | 
 | 1296 | 	while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { | 
 | 1297 | 		ssize_t this = min((ssize_t)iov->iov_len, ret); | 
 | 1298 | 		iov->iov_base += this; | 
 | 1299 | 		iov->iov_len -= this; | 
 | 1300 | 		iocb->ki_left -= this; | 
 | 1301 | 		ret -= this; | 
 | 1302 | 		if (iov->iov_len == 0) { | 
 | 1303 | 			iocb->ki_cur_seg++; | 
 | 1304 | 			iov++; | 
 | 1305 | 		} | 
 | 1306 | 	} | 
 | 1307 |  | 
 | 1308 | 	/* the caller should not have done more io than what fit in | 
 | 1309 | 	 * the remaining iovecs */ | 
 | 1310 | 	BUG_ON(ret > 0 && iocb->ki_left == 0); | 
 | 1311 | } | 
 | 1312 |  | 
 | 1313 | static ssize_t aio_rw_vect_retry(struct kiocb *iocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | { | 
 | 1315 | 	struct file *file = iocb->ki_filp; | 
 | 1316 | 	struct address_space *mapping = file->f_mapping; | 
 | 1317 | 	struct inode *inode = mapping->host; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1318 | 	ssize_t (*rw_op)(struct kiocb *, const struct iovec *, | 
 | 1319 | 			 unsigned long, loff_t); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 | 	ssize_t ret = 0; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1321 | 	unsigned short opcode; | 
 | 1322 |  | 
 | 1323 | 	if ((iocb->ki_opcode == IOCB_CMD_PREADV) || | 
 | 1324 | 		(iocb->ki_opcode == IOCB_CMD_PREAD)) { | 
 | 1325 | 		rw_op = file->f_op->aio_read; | 
 | 1326 | 		opcode = IOCB_CMD_PREADV; | 
 | 1327 | 	} else { | 
 | 1328 | 		rw_op = file->f_op->aio_write; | 
 | 1329 | 		opcode = IOCB_CMD_PWRITEV; | 
 | 1330 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 |  | 
| Zach Brown | 897f15f | 2005-09-30 11:58:55 -0700 | [diff] [blame] | 1332 | 	do { | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1333 | 		ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], | 
 | 1334 | 			    iocb->ki_nr_segs - iocb->ki_cur_seg, | 
 | 1335 | 			    iocb->ki_pos); | 
 | 1336 | 		if (ret > 0) | 
 | 1337 | 			aio_advance_iovec(iocb, ret); | 
| Badari Pulavarty | 027445c | 2006-09-30 23:28:46 -0700 | [diff] [blame] | 1338 |  | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1339 | 	/* retry all partial writes.  retry partial reads as long as its a | 
 | 1340 | 	 * regular file. */ | 
| Zach Brown | 353fb07 | 2005-09-30 11:58:56 -0700 | [diff] [blame] | 1341 | 	} while (ret > 0 && iocb->ki_left > 0 && | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1342 | 		 (opcode == IOCB_CMD_PWRITEV || | 
 | 1343 | 		  (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 |  | 
 | 1345 | 	/* This means we must have transferred all that we could */ | 
 | 1346 | 	/* No need to retry anymore */ | 
 | 1347 | 	if ((ret == 0) || (iocb->ki_left == 0)) | 
 | 1348 | 		ret = iocb->ki_nbytes - iocb->ki_left; | 
 | 1349 |  | 
 | 1350 | 	return ret; | 
 | 1351 | } | 
 | 1352 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | static ssize_t aio_fdsync(struct kiocb *iocb) | 
 | 1354 | { | 
 | 1355 | 	struct file *file = iocb->ki_filp; | 
 | 1356 | 	ssize_t ret = -EINVAL; | 
 | 1357 |  | 
 | 1358 | 	if (file->f_op->aio_fsync) | 
 | 1359 | 		ret = file->f_op->aio_fsync(iocb, 1); | 
 | 1360 | 	return ret; | 
 | 1361 | } | 
 | 1362 |  | 
 | 1363 | static ssize_t aio_fsync(struct kiocb *iocb) | 
 | 1364 | { | 
 | 1365 | 	struct file *file = iocb->ki_filp; | 
 | 1366 | 	ssize_t ret = -EINVAL; | 
 | 1367 |  | 
 | 1368 | 	if (file->f_op->aio_fsync) | 
 | 1369 | 		ret = file->f_op->aio_fsync(iocb, 0); | 
 | 1370 | 	return ret; | 
 | 1371 | } | 
 | 1372 |  | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1373 | static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb) | 
 | 1374 | { | 
 | 1375 | 	ssize_t ret; | 
 | 1376 |  | 
 | 1377 | 	ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf, | 
 | 1378 | 				    kiocb->ki_nbytes, 1, | 
 | 1379 | 				    &kiocb->ki_inline_vec, &kiocb->ki_iovec); | 
 | 1380 | 	if (ret < 0) | 
 | 1381 | 		goto out; | 
 | 1382 |  | 
 | 1383 | 	kiocb->ki_nr_segs = kiocb->ki_nbytes; | 
 | 1384 | 	kiocb->ki_cur_seg = 0; | 
 | 1385 | 	/* ki_nbytes/left now reflect bytes instead of segs */ | 
 | 1386 | 	kiocb->ki_nbytes = ret; | 
 | 1387 | 	kiocb->ki_left = ret; | 
 | 1388 |  | 
 | 1389 | 	ret = 0; | 
 | 1390 | out: | 
 | 1391 | 	return ret; | 
 | 1392 | } | 
 | 1393 |  | 
 | 1394 | static ssize_t aio_setup_single_vector(struct kiocb *kiocb) | 
 | 1395 | { | 
 | 1396 | 	kiocb->ki_iovec = &kiocb->ki_inline_vec; | 
 | 1397 | 	kiocb->ki_iovec->iov_base = kiocb->ki_buf; | 
 | 1398 | 	kiocb->ki_iovec->iov_len = kiocb->ki_left; | 
 | 1399 | 	kiocb->ki_nr_segs = 1; | 
 | 1400 | 	kiocb->ki_cur_seg = 0; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1401 | 	return 0; | 
 | 1402 | } | 
 | 1403 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 | /* | 
 | 1405 |  * aio_setup_iocb: | 
 | 1406 |  *	Performs the initial checks and aio retry method | 
 | 1407 |  *	setup for the kiocb at the time of io submission. | 
 | 1408 |  */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 1409 | static ssize_t aio_setup_iocb(struct kiocb *kiocb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1410 | { | 
 | 1411 | 	struct file *file = kiocb->ki_filp; | 
 | 1412 | 	ssize_t ret = 0; | 
 | 1413 |  | 
 | 1414 | 	switch (kiocb->ki_opcode) { | 
 | 1415 | 	case IOCB_CMD_PREAD: | 
 | 1416 | 		ret = -EBADF; | 
 | 1417 | 		if (unlikely(!(file->f_mode & FMODE_READ))) | 
 | 1418 | 			break; | 
 | 1419 | 		ret = -EFAULT; | 
 | 1420 | 		if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, | 
 | 1421 | 			kiocb->ki_left))) | 
 | 1422 | 			break; | 
| Kostik Belousov | 8766ce4 | 2005-10-23 12:57:13 -0700 | [diff] [blame] | 1423 | 		ret = security_file_permission(file, MAY_READ); | 
 | 1424 | 		if (unlikely(ret)) | 
 | 1425 | 			break; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1426 | 		ret = aio_setup_single_vector(kiocb); | 
 | 1427 | 		if (ret) | 
 | 1428 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | 		ret = -EINVAL; | 
 | 1430 | 		if (file->f_op->aio_read) | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1431 | 			kiocb->ki_retry = aio_rw_vect_retry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 | 		break; | 
 | 1433 | 	case IOCB_CMD_PWRITE: | 
 | 1434 | 		ret = -EBADF; | 
 | 1435 | 		if (unlikely(!(file->f_mode & FMODE_WRITE))) | 
 | 1436 | 			break; | 
 | 1437 | 		ret = -EFAULT; | 
 | 1438 | 		if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, | 
 | 1439 | 			kiocb->ki_left))) | 
 | 1440 | 			break; | 
| Kostik Belousov | 8766ce4 | 2005-10-23 12:57:13 -0700 | [diff] [blame] | 1441 | 		ret = security_file_permission(file, MAY_WRITE); | 
 | 1442 | 		if (unlikely(ret)) | 
 | 1443 | 			break; | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1444 | 		ret = aio_setup_single_vector(kiocb); | 
 | 1445 | 		if (ret) | 
 | 1446 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1447 | 		ret = -EINVAL; | 
 | 1448 | 		if (file->f_op->aio_write) | 
| Badari Pulavarty | eed4e51 | 2006-09-30 23:28:49 -0700 | [diff] [blame] | 1449 | 			kiocb->ki_retry = aio_rw_vect_retry; | 
 | 1450 | 		break; | 
 | 1451 | 	case IOCB_CMD_PREADV: | 
 | 1452 | 		ret = -EBADF; | 
 | 1453 | 		if (unlikely(!(file->f_mode & FMODE_READ))) | 
 | 1454 | 			break; | 
 | 1455 | 		ret = security_file_permission(file, MAY_READ); | 
 | 1456 | 		if (unlikely(ret)) | 
 | 1457 | 			break; | 
 | 1458 | 		ret = aio_setup_vectored_rw(READ, kiocb); | 
 | 1459 | 		if (ret) | 
 | 1460 | 			break; | 
 | 1461 | 		ret = -EINVAL; | 
 | 1462 | 		if (file->f_op->aio_read) | 
 | 1463 | 			kiocb->ki_retry = aio_rw_vect_retry; | 
 | 1464 | 		break; | 
 | 1465 | 	case IOCB_CMD_PWRITEV: | 
 | 1466 | 		ret = -EBADF; | 
 | 1467 | 		if (unlikely(!(file->f_mode & FMODE_WRITE))) | 
 | 1468 | 			break; | 
 | 1469 | 		ret = security_file_permission(file, MAY_WRITE); | 
 | 1470 | 		if (unlikely(ret)) | 
 | 1471 | 			break; | 
 | 1472 | 		ret = aio_setup_vectored_rw(WRITE, kiocb); | 
 | 1473 | 		if (ret) | 
 | 1474 | 			break; | 
 | 1475 | 		ret = -EINVAL; | 
 | 1476 | 		if (file->f_op->aio_write) | 
 | 1477 | 			kiocb->ki_retry = aio_rw_vect_retry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1478 | 		break; | 
 | 1479 | 	case IOCB_CMD_FDSYNC: | 
 | 1480 | 		ret = -EINVAL; | 
 | 1481 | 		if (file->f_op->aio_fsync) | 
 | 1482 | 			kiocb->ki_retry = aio_fdsync; | 
 | 1483 | 		break; | 
 | 1484 | 	case IOCB_CMD_FSYNC: | 
 | 1485 | 		ret = -EINVAL; | 
 | 1486 | 		if (file->f_op->aio_fsync) | 
 | 1487 | 			kiocb->ki_retry = aio_fsync; | 
 | 1488 | 		break; | 
 | 1489 | 	default: | 
 | 1490 | 		dprintk("EINVAL: io_submit: no operation provided\n"); | 
 | 1491 | 		ret = -EINVAL; | 
 | 1492 | 	} | 
 | 1493 |  | 
 | 1494 | 	if (!kiocb->ki_retry) | 
 | 1495 | 		return ret; | 
 | 1496 |  | 
 | 1497 | 	return 0; | 
 | 1498 | } | 
 | 1499 |  | 
 | 1500 | /* | 
 | 1501 |  * aio_wake_function: | 
 | 1502 |  * 	wait queue callback function for aio notification, | 
 | 1503 |  * 	Simply triggers a retry of the operation via kick_iocb. | 
 | 1504 |  * | 
 | 1505 |  * 	This callback is specified in the wait queue entry in | 
 | 1506 |  *	a kiocb	(current->io_wait points to this wait queue | 
 | 1507 |  *	entry when an aio operation executes; it is used | 
 | 1508 |  * 	instead of a synchronous wait when an i/o blocking | 
 | 1509 |  *	condition is encountered during aio). | 
 | 1510 |  * | 
 | 1511 |  * Note: | 
 | 1512 |  * This routine is executed with the wait queue lock held. | 
 | 1513 |  * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests | 
 | 1514 |  * the ioctx lock inside the wait queue lock. This is safe | 
 | 1515 |  * because this callback isn't used for wait queues which | 
 | 1516 |  * are nested inside ioctx lock (i.e. ctx->wait) | 
 | 1517 |  */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 1518 | static int aio_wake_function(wait_queue_t *wait, unsigned mode, | 
 | 1519 | 			     int sync, void *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | { | 
 | 1521 | 	struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait); | 
 | 1522 |  | 
 | 1523 | 	list_del_init(&wait->task_list); | 
 | 1524 | 	kick_iocb(iocb); | 
 | 1525 | 	return 1; | 
 | 1526 | } | 
 | 1527 |  | 
 | 1528 | int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 
 | 1529 | 			 struct iocb *iocb) | 
 | 1530 | { | 
 | 1531 | 	struct kiocb *req; | 
 | 1532 | 	struct file *file; | 
 | 1533 | 	ssize_t ret; | 
 | 1534 |  | 
 | 1535 | 	/* enforce forwards compatibility on users */ | 
 | 1536 | 	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2 || | 
 | 1537 | 		     iocb->aio_reserved3)) { | 
 | 1538 | 		pr_debug("EINVAL: io_submit: reserve field set\n"); | 
 | 1539 | 		return -EINVAL; | 
 | 1540 | 	} | 
 | 1541 |  | 
 | 1542 | 	/* prevent overflows */ | 
 | 1543 | 	if (unlikely( | 
 | 1544 | 	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | 
 | 1545 | 	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | 
 | 1546 | 	    ((ssize_t)iocb->aio_nbytes < 0) | 
 | 1547 | 	   )) { | 
 | 1548 | 		pr_debug("EINVAL: io_submit: overflow check\n"); | 
 | 1549 | 		return -EINVAL; | 
 | 1550 | 	} | 
 | 1551 |  | 
 | 1552 | 	file = fget(iocb->aio_fildes); | 
 | 1553 | 	if (unlikely(!file)) | 
 | 1554 | 		return -EBADF; | 
 | 1555 |  | 
 | 1556 | 	req = aio_get_req(ctx);		/* returns with 2 references to req */ | 
 | 1557 | 	if (unlikely(!req)) { | 
 | 1558 | 		fput(file); | 
 | 1559 | 		return -EAGAIN; | 
 | 1560 | 	} | 
 | 1561 |  | 
 | 1562 | 	req->ki_filp = file; | 
| Ken Chen | 212079c | 2005-05-01 08:59:15 -0700 | [diff] [blame] | 1563 | 	ret = put_user(req->ki_key, &user_iocb->aio_key); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1564 | 	if (unlikely(ret)) { | 
 | 1565 | 		dprintk("EFAULT: aio_key\n"); | 
 | 1566 | 		goto out_put_req; | 
 | 1567 | 	} | 
 | 1568 |  | 
 | 1569 | 	req->ki_obj.user = user_iocb; | 
 | 1570 | 	req->ki_user_data = iocb->aio_data; | 
 | 1571 | 	req->ki_pos = iocb->aio_offset; | 
 | 1572 |  | 
 | 1573 | 	req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; | 
 | 1574 | 	req->ki_left = req->ki_nbytes = iocb->aio_nbytes; | 
 | 1575 | 	req->ki_opcode = iocb->aio_lio_opcode; | 
 | 1576 | 	init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); | 
 | 1577 | 	INIT_LIST_HEAD(&req->ki_wait.task_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 |  | 
 | 1579 | 	ret = aio_setup_iocb(req); | 
 | 1580 |  | 
 | 1581 | 	if (ret) | 
 | 1582 | 		goto out_put_req; | 
 | 1583 |  | 
 | 1584 | 	spin_lock_irq(&ctx->ctx_lock); | 
| Benjamin LaHaise | ac0b1bc | 2005-09-09 13:02:09 -0700 | [diff] [blame] | 1585 | 	aio_run_iocb(req); | 
| Benjamin LaHaise | ac0b1bc | 2005-09-09 13:02:09 -0700 | [diff] [blame] | 1586 | 	if (!list_empty(&ctx->run_list)) { | 
| Ken Chen | 954d3e9 | 2005-05-01 08:59:16 -0700 | [diff] [blame] | 1587 | 		/* drain the run list */ | 
 | 1588 | 		while (__aio_run_iocbs(ctx)) | 
 | 1589 | 			; | 
 | 1590 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | 	spin_unlock_irq(&ctx->ctx_lock); | 
 | 1592 | 	aio_put_req(req);	/* drop extra ref to req */ | 
 | 1593 | 	return 0; | 
 | 1594 |  | 
 | 1595 | out_put_req: | 
 | 1596 | 	aio_put_req(req);	/* drop extra ref to req */ | 
 | 1597 | 	aio_put_req(req);	/* drop i/o ref to req */ | 
 | 1598 | 	return ret; | 
 | 1599 | } | 
 | 1600 |  | 
 | 1601 | /* sys_io_submit: | 
 | 1602 |  *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns | 
 | 1603 |  *	the number of iocbs queued.  May return -EINVAL if the aio_context | 
 | 1604 |  *	specified by ctx_id is invalid, if nr is < 0, if the iocb at | 
 | 1605 |  *	*iocbpp[0] is not properly initialized, if the operation specified | 
 | 1606 |  *	is invalid for the file descriptor in the iocb.  May fail with | 
 | 1607 |  *	-EFAULT if any of the data structures point to invalid data.  May | 
 | 1608 |  *	fail with -EBADF if the file descriptor specified in the first | 
 | 1609 |  *	iocb is invalid.  May fail with -EAGAIN if insufficient resources | 
 | 1610 |  *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will | 
 | 1611 |  *	fail with -ENOSYS if not implemented. | 
 | 1612 |  */ | 
 | 1613 | asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr, | 
 | 1614 | 			      struct iocb __user * __user *iocbpp) | 
 | 1615 | { | 
 | 1616 | 	struct kioctx *ctx; | 
 | 1617 | 	long ret = 0; | 
 | 1618 | 	int i; | 
 | 1619 |  | 
 | 1620 | 	if (unlikely(nr < 0)) | 
 | 1621 | 		return -EINVAL; | 
 | 1622 |  | 
 | 1623 | 	if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) | 
 | 1624 | 		return -EFAULT; | 
 | 1625 |  | 
 | 1626 | 	ctx = lookup_ioctx(ctx_id); | 
 | 1627 | 	if (unlikely(!ctx)) { | 
 | 1628 | 		pr_debug("EINVAL: io_submit: invalid context id\n"); | 
 | 1629 | 		return -EINVAL; | 
 | 1630 | 	} | 
 | 1631 |  | 
 | 1632 | 	/* | 
 | 1633 | 	 * AKPM: should this return a partial result if some of the IOs were | 
 | 1634 | 	 * successfully submitted? | 
 | 1635 | 	 */ | 
 | 1636 | 	for (i=0; i<nr; i++) { | 
 | 1637 | 		struct iocb __user *user_iocb; | 
 | 1638 | 		struct iocb tmp; | 
 | 1639 |  | 
 | 1640 | 		if (unlikely(__get_user(user_iocb, iocbpp + i))) { | 
 | 1641 | 			ret = -EFAULT; | 
 | 1642 | 			break; | 
 | 1643 | 		} | 
 | 1644 |  | 
 | 1645 | 		if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { | 
 | 1646 | 			ret = -EFAULT; | 
 | 1647 | 			break; | 
 | 1648 | 		} | 
 | 1649 |  | 
 | 1650 | 		ret = io_submit_one(ctx, user_iocb, &tmp); | 
 | 1651 | 		if (ret) | 
 | 1652 | 			break; | 
 | 1653 | 	} | 
 | 1654 |  | 
 | 1655 | 	put_ioctx(ctx); | 
 | 1656 | 	return i ? i : ret; | 
 | 1657 | } | 
 | 1658 |  | 
 | 1659 | /* lookup_kiocb | 
 | 1660 |  *	Finds a given iocb for cancellation. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1661 |  */ | 
| Adrian Bunk | 25ee7e3 | 2005-04-25 08:18:14 -0700 | [diff] [blame] | 1662 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | 
 | 1663 | 				  u32 key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1664 | { | 
 | 1665 | 	struct list_head *pos; | 
| Zach Brown | d00689a | 2005-11-13 16:07:34 -0800 | [diff] [blame] | 1666 |  | 
 | 1667 | 	assert_spin_locked(&ctx->ctx_lock); | 
 | 1668 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1669 | 	/* TODO: use a hash or array, this sucks. */ | 
 | 1670 | 	list_for_each(pos, &ctx->active_reqs) { | 
 | 1671 | 		struct kiocb *kiocb = list_kiocb(pos); | 
 | 1672 | 		if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) | 
 | 1673 | 			return kiocb; | 
 | 1674 | 	} | 
 | 1675 | 	return NULL; | 
 | 1676 | } | 
 | 1677 |  | 
 | 1678 | /* sys_io_cancel: | 
 | 1679 |  *	Attempts to cancel an iocb previously passed to io_submit.  If | 
 | 1680 |  *	the operation is successfully cancelled, the resulting event is | 
 | 1681 |  *	copied into the memory pointed to by result without being placed | 
 | 1682 |  *	into the completion queue and 0 is returned.  May fail with | 
 | 1683 |  *	-EFAULT if any of the data structures pointed to are invalid. | 
 | 1684 |  *	May fail with -EINVAL if aio_context specified by ctx_id is | 
 | 1685 |  *	invalid.  May fail with -EAGAIN if the iocb specified was not | 
 | 1686 |  *	cancelled.  Will fail with -ENOSYS if not implemented. | 
 | 1687 |  */ | 
 | 1688 | asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, | 
 | 1689 | 			      struct io_event __user *result) | 
 | 1690 | { | 
 | 1691 | 	int (*cancel)(struct kiocb *iocb, struct io_event *res); | 
 | 1692 | 	struct kioctx *ctx; | 
 | 1693 | 	struct kiocb *kiocb; | 
 | 1694 | 	u32 key; | 
 | 1695 | 	int ret; | 
 | 1696 |  | 
 | 1697 | 	ret = get_user(key, &iocb->aio_key); | 
 | 1698 | 	if (unlikely(ret)) | 
 | 1699 | 		return -EFAULT; | 
 | 1700 |  | 
 | 1701 | 	ctx = lookup_ioctx(ctx_id); | 
 | 1702 | 	if (unlikely(!ctx)) | 
 | 1703 | 		return -EINVAL; | 
 | 1704 |  | 
 | 1705 | 	spin_lock_irq(&ctx->ctx_lock); | 
 | 1706 | 	ret = -EAGAIN; | 
 | 1707 | 	kiocb = lookup_kiocb(ctx, iocb, key); | 
 | 1708 | 	if (kiocb && kiocb->ki_cancel) { | 
 | 1709 | 		cancel = kiocb->ki_cancel; | 
 | 1710 | 		kiocb->ki_users ++; | 
 | 1711 | 		kiocbSetCancelled(kiocb); | 
 | 1712 | 	} else | 
 | 1713 | 		cancel = NULL; | 
 | 1714 | 	spin_unlock_irq(&ctx->ctx_lock); | 
 | 1715 |  | 
 | 1716 | 	if (NULL != cancel) { | 
 | 1717 | 		struct io_event tmp; | 
 | 1718 | 		pr_debug("calling cancel\n"); | 
 | 1719 | 		memset(&tmp, 0, sizeof(tmp)); | 
 | 1720 | 		tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; | 
 | 1721 | 		tmp.data = kiocb->ki_user_data; | 
 | 1722 | 		ret = cancel(kiocb, &tmp); | 
 | 1723 | 		if (!ret) { | 
 | 1724 | 			/* Cancellation succeeded -- copy the result | 
 | 1725 | 			 * into the user's buffer. | 
 | 1726 | 			 */ | 
 | 1727 | 			if (copy_to_user(result, &tmp, sizeof(tmp))) | 
 | 1728 | 				ret = -EFAULT; | 
 | 1729 | 		} | 
 | 1730 | 	} else | 
| Wendy Cheng | 8f58202 | 2005-09-09 13:02:08 -0700 | [diff] [blame] | 1731 | 		ret = -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 |  | 
 | 1733 | 	put_ioctx(ctx); | 
 | 1734 |  | 
 | 1735 | 	return ret; | 
 | 1736 | } | 
 | 1737 |  | 
 | 1738 | /* io_getevents: | 
 | 1739 |  *	Attempts to read at least min_nr events and up to nr events from | 
 | 1740 |  *	the completion queue for the aio_context specified by ctx_id.  May | 
 | 1741 |  *	fail with -EINVAL if ctx_id is invalid, if min_nr is out of range, | 
 | 1742 |  *	if nr is out of range, if when is out of range.  May fail with | 
 | 1743 |  *	-EFAULT if any of the memory specified to is invalid.  May return | 
 | 1744 |  *	0 or < min_nr if no events are available and the timeout specified | 
 | 1745 |  *	by when	has elapsed, where when == NULL specifies an infinite | 
 | 1746 |  *	timeout.  Note that the timeout pointed to by when is relative and | 
 | 1747 |  *	will be updated if not NULL and the operation blocks.  Will fail | 
 | 1748 |  *	with -ENOSYS if not implemented. | 
 | 1749 |  */ | 
 | 1750 | asmlinkage long sys_io_getevents(aio_context_t ctx_id, | 
 | 1751 | 				 long min_nr, | 
 | 1752 | 				 long nr, | 
 | 1753 | 				 struct io_event __user *events, | 
 | 1754 | 				 struct timespec __user *timeout) | 
 | 1755 | { | 
 | 1756 | 	struct kioctx *ioctx = lookup_ioctx(ctx_id); | 
 | 1757 | 	long ret = -EINVAL; | 
 | 1758 |  | 
 | 1759 | 	if (likely(ioctx)) { | 
 | 1760 | 		if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0)) | 
 | 1761 | 			ret = read_events(ioctx, min_nr, nr, events, timeout); | 
 | 1762 | 		put_ioctx(ioctx); | 
 | 1763 | 	} | 
 | 1764 |  | 
 | 1765 | 	return ret; | 
 | 1766 | } | 
 | 1767 |  | 
 | 1768 | __initcall(aio_setup); | 
 | 1769 |  | 
 | 1770 | EXPORT_SYMBOL(aio_complete); | 
 | 1771 | EXPORT_SYMBOL(aio_put_req); | 
 | 1772 | EXPORT_SYMBOL(wait_on_sync_kiocb); |