| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * fs/direct-io.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
 | 5 |  * | 
 | 6 |  * O_DIRECT | 
 | 7 |  * | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 8 |  * 04Jul2002	Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 |  *		Initial version | 
 | 10 |  * 11Sep2002	janetinc@us.ibm.com | 
 | 11 |  * 		added readv/writev support. | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 12 |  * 29Oct2002	Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 |  *		rewrote bio_add_page() support. | 
 | 14 |  * 30Oct2002	pbadari@us.ibm.com | 
 | 15 |  *		added support for non-aligned IO. | 
 | 16 |  * 06Nov2002	pbadari@us.ibm.com | 
 | 17 |  *		added asynchronous IO support. | 
 | 18 |  * 21Jul2003	nathans@sgi.com | 
 | 19 |  *		added IO completion notifier. | 
 | 20 |  */ | 
 | 21 |  | 
 | 22 | #include <linux/kernel.h> | 
 | 23 | #include <linux/module.h> | 
 | 24 | #include <linux/types.h> | 
 | 25 | #include <linux/fs.h> | 
 | 26 | #include <linux/mm.h> | 
 | 27 | #include <linux/slab.h> | 
 | 28 | #include <linux/highmem.h> | 
 | 29 | #include <linux/pagemap.h> | 
| Andrew Morton | 98c4d57 | 2006-12-10 02:19:47 -0800 | [diff] [blame] | 30 | #include <linux/task_io_accounting_ops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/bio.h> | 
 | 32 | #include <linux/wait.h> | 
 | 33 | #include <linux/err.h> | 
 | 34 | #include <linux/blkdev.h> | 
 | 35 | #include <linux/buffer_head.h> | 
 | 36 | #include <linux/rwsem.h> | 
 | 37 | #include <linux/uio.h> | 
 | 38 | #include <asm/atomic.h> | 
 | 39 |  | 
 | 40 | /* | 
 | 41 |  * How many user pages to map in one call to get_user_pages().  This determines | 
 | 42 |  * the size of a structure on the stack. | 
 | 43 |  */ | 
 | 44 | #define DIO_PAGES	64 | 
 | 45 |  | 
 | 46 | /* | 
 | 47 |  * This code generally works in units of "dio_blocks".  A dio_block is | 
 | 48 |  * somewhere between the hard sector size and the filesystem block size.  it | 
 | 49 |  * is determined on a per-invocation basis.   When talking to the filesystem | 
 | 50 |  * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity | 
 | 51 |  * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted | 
 | 52 |  * to bio_block quantities by shifting left by blkfactor. | 
 | 53 |  * | 
 | 54 |  * If blkfactor is zero then the user's request was aligned to the filesystem's | 
 | 55 |  * blocksize. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 |  */ | 
 | 57 |  | 
 | 58 | struct dio { | 
 | 59 | 	/* BIO submission state */ | 
 | 60 | 	struct bio *bio;		/* bio under assembly */ | 
 | 61 | 	struct inode *inode; | 
 | 62 | 	int rw; | 
| Daniel McNeil | 29504ff | 2005-04-16 15:25:50 -0700 | [diff] [blame] | 63 | 	loff_t i_size;			/* i_size when submitted */ | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 64 | 	int flags;			/* doesn't change */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | 	unsigned blkbits;		/* doesn't change */ | 
 | 66 | 	unsigned blkfactor;		/* When we're using an alignment which | 
 | 67 | 					   is finer than the filesystem's soft | 
 | 68 | 					   blocksize, this specifies how much | 
 | 69 | 					   finer.  blkfactor=2 means 1/4-block | 
 | 70 | 					   alignment.  Does not change */ | 
 | 71 | 	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has | 
 | 72 | 					   been performed at the start of a | 
 | 73 | 					   write */ | 
 | 74 | 	int pages_in_io;		/* approximate total IO pages */ | 
 | 75 | 	size_t	size;			/* total request size (doesn't change)*/ | 
 | 76 | 	sector_t block_in_file;		/* Current offset into the underlying | 
 | 77 | 					   file in dio_block units. */ | 
 | 78 | 	unsigned blocks_available;	/* At block_in_file.  changes */ | 
 | 79 | 	sector_t final_block_in_request;/* doesn't change */ | 
 | 80 | 	unsigned first_block_in_page;	/* doesn't change, Used only once */ | 
 | 81 | 	int boundary;			/* prev block is at a boundary */ | 
 | 82 | 	int reap_counter;		/* rate limit reaping */ | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 83 | 	get_block_t *get_block;		/* block mapping function */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | 	dio_iodone_t *end_io;		/* IO completion function */ | 
 | 85 | 	sector_t final_block_in_bio;	/* current final block in bio + 1 */ | 
 | 86 | 	sector_t next_block_for_io;	/* next block to be put under IO, | 
 | 87 | 					   in dio_blocks units */ | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 88 | 	struct buffer_head map_bh;	/* last get_block() result */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  | 
 | 90 | 	/* | 
 | 91 | 	 * Deferred addition of a page to the dio.  These variables are | 
 | 92 | 	 * private to dio_send_cur_page(), submit_page_section() and | 
 | 93 | 	 * dio_bio_add_page(). | 
 | 94 | 	 */ | 
 | 95 | 	struct page *cur_page;		/* The page */ | 
 | 96 | 	unsigned cur_page_offset;	/* Offset into it, in bytes */ | 
 | 97 | 	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */ | 
 | 98 | 	sector_t cur_page_block;	/* Where it starts */ | 
 | 99 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | 	/* BIO completion state */ | 
 | 101 | 	spinlock_t bio_lock;		/* protects BIO fields below */ | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 102 | 	unsigned long refcount;		/* direct_io_worker() and bios */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | 	struct bio *bio_list;		/* singly linked via bi_private */ | 
 | 104 | 	struct task_struct *waiter;	/* waiting task (NULL if none) */ | 
 | 105 |  | 
 | 106 | 	/* AIO related stuff */ | 
 | 107 | 	struct kiocb *iocb;		/* kiocb */ | 
 | 108 | 	int is_async;			/* is IO async ? */ | 
| Chen, Kenneth W | 174e27c | 2006-03-25 03:08:16 -0800 | [diff] [blame] | 109 | 	int io_error;			/* IO error in completion path */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | 	ssize_t result;                 /* IO result */ | 
| Jeff Moyer | 23aee09 | 2009-12-15 16:47:49 -0800 | [diff] [blame] | 111 |  | 
 | 112 | 	/* | 
 | 113 | 	 * Page fetching state. These variables belong to dio_refill_pages(). | 
 | 114 | 	 */ | 
 | 115 | 	int curr_page;			/* changes */ | 
 | 116 | 	int total_pages;		/* doesn't change */ | 
 | 117 | 	unsigned long curr_user_address;/* changes */ | 
 | 118 |  | 
 | 119 | 	/* | 
 | 120 | 	 * Page queue.  These variables belong to dio_refill_pages() and | 
 | 121 | 	 * dio_get_page(). | 
 | 122 | 	 */ | 
 | 123 | 	unsigned head;			/* next page to process */ | 
 | 124 | 	unsigned tail;			/* last valid page + 1 */ | 
 | 125 | 	int page_errors;		/* errno from get_user_pages() */ | 
 | 126 |  | 
 | 127 | 	/* | 
 | 128 | 	 * pages[] (and any fields placed after it) are not zeroed out at | 
 | 129 | 	 * allocation time.  Don't add new fields after pages[] unless you | 
 | 130 | 	 * wish that they not be zeroed. | 
 | 131 | 	 */ | 
 | 132 | 	struct page *pages[DIO_PAGES];	/* page buffer */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | }; | 
 | 134 |  | 
 | 135 | /* | 
 | 136 |  * How many pages are in the queue? | 
 | 137 |  */ | 
 | 138 | static inline unsigned dio_pages_present(struct dio *dio) | 
 | 139 | { | 
 | 140 | 	return dio->tail - dio->head; | 
 | 141 | } | 
 | 142 |  | 
 | 143 | /* | 
 | 144 |  * Go grab and pin some userspace pages.   Typically we'll get 64 at a time. | 
 | 145 |  */ | 
 | 146 | static int dio_refill_pages(struct dio *dio) | 
 | 147 | { | 
 | 148 | 	int ret; | 
 | 149 | 	int nr_pages; | 
 | 150 |  | 
 | 151 | 	nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES); | 
| Nick Piggin | f5dd33c | 2008-07-25 19:45:25 -0700 | [diff] [blame] | 152 | 	ret = get_user_pages_fast( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | 		dio->curr_user_address,		/* Where from? */ | 
 | 154 | 		nr_pages,			/* How many pages? */ | 
 | 155 | 		dio->rw == READ,		/* Write to memory? */ | 
| Nick Piggin | f5dd33c | 2008-07-25 19:45:25 -0700 | [diff] [blame] | 156 | 		&dio->pages[0]);		/* Put results here */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 |  | 
| Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 158 | 	if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) { | 
| Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 159 | 		struct page *page = ZERO_PAGE(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | 		/* | 
 | 161 | 		 * A memory fault, but the filesystem has some outstanding | 
 | 162 | 		 * mapped blocks.  We need to use those blocks up to avoid | 
 | 163 | 		 * leaking stale data in the file. | 
 | 164 | 		 */ | 
 | 165 | 		if (dio->page_errors == 0) | 
 | 166 | 			dio->page_errors = ret; | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 167 | 		page_cache_get(page); | 
 | 168 | 		dio->pages[0] = page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | 		dio->head = 0; | 
 | 170 | 		dio->tail = 1; | 
 | 171 | 		ret = 0; | 
 | 172 | 		goto out; | 
 | 173 | 	} | 
 | 174 |  | 
 | 175 | 	if (ret >= 0) { | 
 | 176 | 		dio->curr_user_address += ret * PAGE_SIZE; | 
 | 177 | 		dio->curr_page += ret; | 
 | 178 | 		dio->head = 0; | 
 | 179 | 		dio->tail = ret; | 
 | 180 | 		ret = 0; | 
 | 181 | 	} | 
 | 182 | out: | 
 | 183 | 	return ret;	 | 
 | 184 | } | 
 | 185 |  | 
 | 186 | /* | 
 | 187 |  * Get another userspace page.  Returns an ERR_PTR on error.  Pages are | 
 | 188 |  * buffered inside the dio so that we can call get_user_pages() against a | 
 | 189 |  * decent number of pages, less frequently.  To provide nicer use of the | 
 | 190 |  * L1 cache. | 
 | 191 |  */ | 
 | 192 | static struct page *dio_get_page(struct dio *dio) | 
 | 193 | { | 
 | 194 | 	if (dio_pages_present(dio) == 0) { | 
 | 195 | 		int ret; | 
 | 196 |  | 
 | 197 | 		ret = dio_refill_pages(dio); | 
 | 198 | 		if (ret) | 
 | 199 | 			return ERR_PTR(ret); | 
 | 200 | 		BUG_ON(dio_pages_present(dio) == 0); | 
 | 201 | 	} | 
 | 202 | 	return dio->pages[dio->head++]; | 
 | 203 | } | 
 | 204 |  | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 205 | /** | 
 | 206 |  * dio_complete() - called when all DIO BIO I/O has been completed | 
 | 207 |  * @offset: the byte offset in the file of the completed operation | 
 | 208 |  * | 
 | 209 |  * This releases locks as dictated by the locking type, lets interested parties | 
 | 210 |  * know that a DIO operation has completed, and calculates the resulting return | 
 | 211 |  * code for the operation. | 
 | 212 |  * | 
 | 213 |  * It lets the filesystem know if it registered an interest earlier via | 
 | 214 |  * get_block.  Pass the private field of the map buffer_head so that | 
 | 215 |  * filesystems can use it to hold additional state between get_block calls and | 
 | 216 |  * dio_complete. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 |  */ | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 218 | static int dio_complete(struct dio *dio, loff_t offset, int ret) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | { | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 220 | 	ssize_t transferred = 0; | 
 | 221 |  | 
| Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 222 | 	/* | 
 | 223 | 	 * AIO submission can race with bio completion to get here while | 
 | 224 | 	 * expecting to have the last io completed by bio completion. | 
 | 225 | 	 * In that case -EIOCBQUEUED is in fact not an error we want | 
 | 226 | 	 * to preserve through this call. | 
 | 227 | 	 */ | 
 | 228 | 	if (ret == -EIOCBQUEUED) | 
 | 229 | 		ret = 0; | 
 | 230 |  | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 231 | 	if (dio->result) { | 
 | 232 | 		transferred = dio->result; | 
 | 233 |  | 
 | 234 | 		/* Check for short read case */ | 
 | 235 | 		if ((dio->rw == READ) && ((offset + transferred) > dio->i_size)) | 
 | 236 | 			transferred = dio->i_size - offset; | 
 | 237 | 	} | 
 | 238 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | 	if (dio->end_io && dio->result) | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 240 | 		dio->end_io(dio->iocb, offset, transferred, | 
 | 241 | 			    dio->map_bh.b_private); | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 242 |  | 
 | 243 | 	if (dio->flags & DIO_LOCKING) | 
| Ingo Molnar | d8aa905 | 2006-07-03 00:25:02 -0700 | [diff] [blame] | 244 | 		/* lockdep: non-owner release */ | 
 | 245 | 		up_read_non_owner(&dio->inode->i_alloc_sem); | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 246 |  | 
 | 247 | 	if (ret == 0) | 
 | 248 | 		ret = dio->page_errors; | 
 | 249 | 	if (ret == 0) | 
 | 250 | 		ret = dio->io_error; | 
 | 251 | 	if (ret == 0) | 
 | 252 | 		ret = transferred; | 
 | 253 |  | 
 | 254 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | } | 
 | 256 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | static int dio_bio_complete(struct dio *dio, struct bio *bio); | 
 | 258 | /* | 
 | 259 |  * Asynchronous IO callback.  | 
 | 260 |  */ | 
| NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 261 | static void dio_bio_end_aio(struct bio *bio, int error) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | { | 
 | 263 | 	struct dio *dio = bio->bi_private; | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 264 | 	unsigned long remaining; | 
 | 265 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | 	/* cleanup the bio */ | 
 | 268 | 	dio_bio_complete(dio, bio); | 
| Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 269 |  | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 270 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 271 | 	remaining = --dio->refcount; | 
 | 272 | 	if (remaining == 1 && dio->waiter) | 
| Zach Brown | 20258b2 | 2006-12-10 02:21:01 -0800 | [diff] [blame] | 273 | 		wake_up_process(dio->waiter); | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 274 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
| Zach Brown | 20258b2 | 2006-12-10 02:21:01 -0800 | [diff] [blame] | 275 |  | 
| Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 276 | 	if (remaining == 0) { | 
 | 277 | 		int ret = dio_complete(dio, dio->iocb->ki_pos, 0); | 
 | 278 | 		aio_complete(dio->iocb, ret, 0); | 
 | 279 | 		kfree(dio); | 
 | 280 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | } | 
 | 282 |  | 
 | 283 | /* | 
 | 284 |  * The BIO completion handler simply queues the BIO up for the process-context | 
 | 285 |  * handler. | 
 | 286 |  * | 
 | 287 |  * During I/O bi_private points at the dio.  After I/O, bi_private is used to | 
 | 288 |  * implement a singly-linked list of completed BIOs, at dio->bio_list. | 
 | 289 |  */ | 
| NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 290 | static void dio_bio_end_io(struct bio *bio, int error) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | { | 
 | 292 | 	struct dio *dio = bio->bi_private; | 
 | 293 | 	unsigned long flags; | 
 | 294 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 296 | 	bio->bi_private = dio->bio_list; | 
 | 297 | 	dio->bio_list = bio; | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 298 | 	if (--dio->refcount == 1 && dio->waiter) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | 		wake_up_process(dio->waiter); | 
 | 300 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | } | 
 | 302 |  | 
 | 303 | static int | 
 | 304 | dio_bio_alloc(struct dio *dio, struct block_device *bdev, | 
 | 305 | 		sector_t first_sector, int nr_vecs) | 
 | 306 | { | 
 | 307 | 	struct bio *bio; | 
 | 308 |  | 
 | 309 | 	bio = bio_alloc(GFP_KERNEL, nr_vecs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 |  | 
 | 311 | 	bio->bi_bdev = bdev; | 
 | 312 | 	bio->bi_sector = first_sector; | 
 | 313 | 	if (dio->is_async) | 
 | 314 | 		bio->bi_end_io = dio_bio_end_aio; | 
 | 315 | 	else | 
 | 316 | 		bio->bi_end_io = dio_bio_end_io; | 
 | 317 |  | 
 | 318 | 	dio->bio = bio; | 
 | 319 | 	return 0; | 
 | 320 | } | 
 | 321 |  | 
 | 322 | /* | 
 | 323 |  * In the AIO read case we speculatively dirty the pages before starting IO. | 
 | 324 |  * During IO completion, any of these pages which happen to have been written | 
 | 325 |  * back will be redirtied by bio_check_pages_dirty(). | 
| Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 326 |  * | 
 | 327 |  * bios hold a dio reference between submit_bio and ->end_io. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 |  */ | 
 | 329 | static void dio_bio_submit(struct dio *dio) | 
 | 330 | { | 
 | 331 | 	struct bio *bio = dio->bio; | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 332 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 |  | 
 | 334 | 	bio->bi_private = dio; | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 335 |  | 
 | 336 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 337 | 	dio->refcount++; | 
 | 338 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 339 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | 	if (dio->is_async && dio->rw == READ) | 
 | 341 | 		bio_set_pages_dirty(bio); | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 342 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | 	submit_bio(dio->rw, bio); | 
 | 344 |  | 
 | 345 | 	dio->bio = NULL; | 
 | 346 | 	dio->boundary = 0; | 
 | 347 | } | 
 | 348 |  | 
 | 349 | /* | 
 | 350 |  * Release any resources in case of a failure | 
 | 351 |  */ | 
 | 352 | static void dio_cleanup(struct dio *dio) | 
 | 353 | { | 
 | 354 | 	while (dio_pages_present(dio)) | 
 | 355 | 		page_cache_release(dio_get_page(dio)); | 
 | 356 | } | 
 | 357 |  | 
 | 358 | /* | 
| Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 359 |  * Wait for the next BIO to complete.  Remove it and return it.  NULL is | 
 | 360 |  * returned once all BIOs have been completed.  This must only be called once | 
 | 361 |  * all bios have been issued so that dio->refcount can only decrease.  This | 
 | 362 |  * requires that that the caller hold a reference on the dio. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 |  */ | 
 | 364 | static struct bio *dio_await_one(struct dio *dio) | 
 | 365 | { | 
 | 366 | 	unsigned long flags; | 
| Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 367 | 	struct bio *bio = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 |  | 
 | 369 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 370 |  | 
 | 371 | 	/* | 
 | 372 | 	 * Wait as long as the list is empty and there are bios in flight.  bio | 
 | 373 | 	 * completion drops the count, maybe adds to the list, and wakes while | 
 | 374 | 	 * holding the bio_lock so we don't need set_current_state()'s barrier | 
 | 375 | 	 * and can call it after testing our condition. | 
 | 376 | 	 */ | 
 | 377 | 	while (dio->refcount > 1 && dio->bio_list == NULL) { | 
 | 378 | 		__set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 379 | 		dio->waiter = current; | 
 | 380 | 		spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 381 | 		io_schedule(); | 
 | 382 | 		/* wake up sets us TASK_RUNNING */ | 
 | 383 | 		spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 384 | 		dio->waiter = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | 	} | 
| Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 386 | 	if (dio->bio_list) { | 
 | 387 | 		bio = dio->bio_list; | 
 | 388 | 		dio->bio_list = bio->bi_private; | 
 | 389 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 391 | 	return bio; | 
 | 392 | } | 
 | 393 |  | 
 | 394 | /* | 
 | 395 |  * Process one completed BIO.  No locks are held. | 
 | 396 |  */ | 
 | 397 | static int dio_bio_complete(struct dio *dio, struct bio *bio) | 
 | 398 | { | 
 | 399 | 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 
 | 400 | 	struct bio_vec *bvec = bio->bi_io_vec; | 
 | 401 | 	int page_no; | 
 | 402 |  | 
 | 403 | 	if (!uptodate) | 
| Chen, Kenneth W | 174e27c | 2006-03-25 03:08:16 -0800 | [diff] [blame] | 404 | 		dio->io_error = -EIO; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 |  | 
 | 406 | 	if (dio->is_async && dio->rw == READ) { | 
 | 407 | 		bio_check_pages_dirty(bio);	/* transfers ownership */ | 
 | 408 | 	} else { | 
 | 409 | 		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) { | 
 | 410 | 			struct page *page = bvec[page_no].bv_page; | 
 | 411 |  | 
 | 412 | 			if (dio->rw == READ && !PageCompound(page)) | 
 | 413 | 				set_page_dirty_lock(page); | 
 | 414 | 			page_cache_release(page); | 
 | 415 | 		} | 
 | 416 | 		bio_put(bio); | 
 | 417 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | 	return uptodate ? 0 : -EIO; | 
 | 419 | } | 
 | 420 |  | 
 | 421 | /* | 
| Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 422 |  * Wait on and process all in-flight BIOs.  This must only be called once | 
 | 423 |  * all bios have been issued so that the refcount can only decrease. | 
 | 424 |  * This just waits for all bios to make it through dio_bio_complete.  IO | 
| Robert P. J. Day | beb7dd8 | 2007-05-09 07:14:03 +0200 | [diff] [blame] | 425 |  * errors are propagated through dio->io_error and should be propagated via | 
| Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 426 |  * dio_complete(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 |  */ | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 428 | static void dio_await_completion(struct dio *dio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | { | 
| Zach Brown | 0273201 | 2006-12-10 02:20:59 -0800 | [diff] [blame] | 430 | 	struct bio *bio; | 
 | 431 | 	do { | 
 | 432 | 		bio = dio_await_one(dio); | 
 | 433 | 		if (bio) | 
 | 434 | 			dio_bio_complete(dio, bio); | 
 | 435 | 	} while (bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | } | 
 | 437 |  | 
 | 438 | /* | 
 | 439 |  * A really large O_DIRECT read or write can generate a lot of BIOs.  So | 
 | 440 |  * to keep the memory consumption sane we periodically reap any completed BIOs | 
 | 441 |  * during the BIO generation phase. | 
 | 442 |  * | 
 | 443 |  * This also helps to limit the peak amount of pinned userspace memory. | 
 | 444 |  */ | 
 | 445 | static int dio_bio_reap(struct dio *dio) | 
 | 446 | { | 
 | 447 | 	int ret = 0; | 
 | 448 |  | 
 | 449 | 	if (dio->reap_counter++ >= 64) { | 
 | 450 | 		while (dio->bio_list) { | 
 | 451 | 			unsigned long flags; | 
 | 452 | 			struct bio *bio; | 
 | 453 | 			int ret2; | 
 | 454 |  | 
 | 455 | 			spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 456 | 			bio = dio->bio_list; | 
 | 457 | 			dio->bio_list = bio->bi_private; | 
 | 458 | 			spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 459 | 			ret2 = dio_bio_complete(dio, bio); | 
 | 460 | 			if (ret == 0) | 
 | 461 | 				ret = ret2; | 
 | 462 | 		} | 
 | 463 | 		dio->reap_counter = 0; | 
 | 464 | 	} | 
 | 465 | 	return ret; | 
 | 466 | } | 
 | 467 |  | 
 | 468 | /* | 
 | 469 |  * Call into the fs to map some more disk blocks.  We record the current number | 
 | 470 |  * of available blocks at dio->blocks_available.  These are in units of the | 
 | 471 |  * fs blocksize, (1 << inode->i_blkbits). | 
 | 472 |  * | 
 | 473 |  * The fs is allowed to map lots of blocks at once.  If it wants to do that, | 
 | 474 |  * it uses the passed inode-relative block number as the file offset, as usual. | 
 | 475 |  * | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 476 |  * get_block() is passed the number of i_blkbits-sized blocks which direct_io | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 |  * has remaining to do.  The fs should not map more than this number of blocks. | 
 | 478 |  * | 
 | 479 |  * If the fs has mapped a lot of blocks, it should populate bh->b_size to | 
 | 480 |  * indicate how much contiguous disk space has been made available at | 
 | 481 |  * bh->b_blocknr. | 
 | 482 |  * | 
 | 483 |  * If *any* of the mapped blocks are new, then the fs must set buffer_new(). | 
 | 484 |  * This isn't very efficient... | 
 | 485 |  * | 
 | 486 |  * In the case of filesystem holes: the fs may return an arbitrarily-large | 
 | 487 |  * hole by returning an appropriate value in b_size and by clearing | 
 | 488 |  * buffer_mapped().  However the direct-io code will only process holes one | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 489 |  * block at a time - it will repeatedly call get_block() as it walks the hole. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 |  */ | 
 | 491 | static int get_more_blocks(struct dio *dio) | 
 | 492 | { | 
 | 493 | 	int ret; | 
 | 494 | 	struct buffer_head *map_bh = &dio->map_bh; | 
 | 495 | 	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */ | 
 | 496 | 	unsigned long fs_count;	/* Number of filesystem-sized blocks */ | 
 | 497 | 	unsigned long dio_count;/* Number of dio_block-sized blocks */ | 
 | 498 | 	unsigned long blkmask; | 
 | 499 | 	int create; | 
 | 500 |  | 
 | 501 | 	/* | 
 | 502 | 	 * If there was a memory error and we've overwritten all the | 
 | 503 | 	 * mapped blocks then we can now return that memory error | 
 | 504 | 	 */ | 
 | 505 | 	ret = dio->page_errors; | 
 | 506 | 	if (ret == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | 		BUG_ON(dio->block_in_file >= dio->final_block_in_request); | 
 | 508 | 		fs_startblk = dio->block_in_file >> dio->blkfactor; | 
 | 509 | 		dio_count = dio->final_block_in_request - dio->block_in_file; | 
 | 510 | 		fs_count = dio_count >> dio->blkfactor; | 
 | 511 | 		blkmask = (1 << dio->blkfactor) - 1; | 
 | 512 | 		if (dio_count & blkmask)	 | 
 | 513 | 			fs_count++; | 
 | 514 |  | 
| Nathan Scott | 3c674e7 | 2006-03-29 09:26:15 +1000 | [diff] [blame] | 515 | 		map_bh->b_state = 0; | 
 | 516 | 		map_bh->b_size = fs_count << dio->inode->i_blkbits; | 
 | 517 |  | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 518 | 		/* | 
 | 519 | 		 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we | 
 | 520 | 		 * forbid block creations: only overwrites are permitted. | 
 | 521 | 		 * We will return early to the caller once we see an | 
 | 522 | 		 * unmapped buffer head returned, and the caller will fall | 
 | 523 | 		 * back to buffered I/O. | 
 | 524 | 		 * | 
 | 525 | 		 * Otherwise the decision is left to the get_blocks method, | 
 | 526 | 		 * which may decide to handle it or also return an unmapped | 
 | 527 | 		 * buffer head. | 
 | 528 | 		 */ | 
| Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 529 | 		create = dio->rw & WRITE; | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 530 | 		if (dio->flags & DIO_SKIP_HOLES) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | 			if (dio->block_in_file < (i_size_read(dio->inode) >> | 
 | 532 | 							dio->blkbits)) | 
 | 533 | 				create = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | 		} | 
| Nathan Scott | 3c674e7 | 2006-03-29 09:26:15 +1000 | [diff] [blame] | 535 |  | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 536 | 		ret = (*dio->get_block)(dio->inode, fs_startblk, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | 						map_bh, create); | 
 | 538 | 	} | 
 | 539 | 	return ret; | 
 | 540 | } | 
 | 541 |  | 
 | 542 | /* | 
 | 543 |  * There is no bio.  Make one now. | 
 | 544 |  */ | 
 | 545 | static int dio_new_bio(struct dio *dio, sector_t start_sector) | 
 | 546 | { | 
 | 547 | 	sector_t sector; | 
 | 548 | 	int ret, nr_pages; | 
 | 549 |  | 
 | 550 | 	ret = dio_bio_reap(dio); | 
 | 551 | 	if (ret) | 
 | 552 | 		goto out; | 
 | 553 | 	sector = start_sector << (dio->blkbits - 9); | 
 | 554 | 	nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev)); | 
 | 555 | 	BUG_ON(nr_pages <= 0); | 
 | 556 | 	ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages); | 
 | 557 | 	dio->boundary = 0; | 
 | 558 | out: | 
 | 559 | 	return ret; | 
 | 560 | } | 
 | 561 |  | 
 | 562 | /* | 
 | 563 |  * Attempt to put the current chunk of 'cur_page' into the current BIO.  If | 
 | 564 |  * that was successful then update final_block_in_bio and take a ref against | 
 | 565 |  * the just-added page. | 
 | 566 |  * | 
 | 567 |  * Return zero on success.  Non-zero means the caller needs to start a new BIO. | 
 | 568 |  */ | 
 | 569 | static int dio_bio_add_page(struct dio *dio) | 
 | 570 | { | 
 | 571 | 	int ret; | 
 | 572 |  | 
 | 573 | 	ret = bio_add_page(dio->bio, dio->cur_page, | 
 | 574 | 			dio->cur_page_len, dio->cur_page_offset); | 
 | 575 | 	if (ret == dio->cur_page_len) { | 
 | 576 | 		/* | 
 | 577 | 		 * Decrement count only, if we are done with this page | 
 | 578 | 		 */ | 
 | 579 | 		if ((dio->cur_page_len + dio->cur_page_offset) == PAGE_SIZE) | 
 | 580 | 			dio->pages_in_io--; | 
 | 581 | 		page_cache_get(dio->cur_page); | 
 | 582 | 		dio->final_block_in_bio = dio->cur_page_block + | 
 | 583 | 			(dio->cur_page_len >> dio->blkbits); | 
 | 584 | 		ret = 0; | 
 | 585 | 	} else { | 
 | 586 | 		ret = 1; | 
 | 587 | 	} | 
 | 588 | 	return ret; | 
 | 589 | } | 
 | 590 | 		 | 
 | 591 | /* | 
 | 592 |  * Put cur_page under IO.  The section of cur_page which is described by | 
 | 593 |  * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page | 
 | 594 |  * starts on-disk at cur_page_block. | 
 | 595 |  * | 
 | 596 |  * We take a ref against the page here (on behalf of its presence in the bio). | 
 | 597 |  * | 
 | 598 |  * The caller of this function is responsible for removing cur_page from the | 
 | 599 |  * dio, and for dropping the refcount which came from that presence. | 
 | 600 |  */ | 
 | 601 | static int dio_send_cur_page(struct dio *dio) | 
 | 602 | { | 
 | 603 | 	int ret = 0; | 
 | 604 |  | 
 | 605 | 	if (dio->bio) { | 
 | 606 | 		/* | 
 | 607 | 		 * See whether this new request is contiguous with the old | 
 | 608 | 		 */ | 
 | 609 | 		if (dio->final_block_in_bio != dio->cur_page_block) | 
 | 610 | 			dio_bio_submit(dio); | 
 | 611 | 		/* | 
 | 612 | 		 * Submit now if the underlying fs is about to perform a | 
 | 613 | 		 * metadata read | 
 | 614 | 		 */ | 
 | 615 | 		if (dio->boundary) | 
 | 616 | 			dio_bio_submit(dio); | 
 | 617 | 	} | 
 | 618 |  | 
 | 619 | 	if (dio->bio == NULL) { | 
 | 620 | 		ret = dio_new_bio(dio, dio->cur_page_block); | 
 | 621 | 		if (ret) | 
 | 622 | 			goto out; | 
 | 623 | 	} | 
 | 624 |  | 
 | 625 | 	if (dio_bio_add_page(dio) != 0) { | 
 | 626 | 		dio_bio_submit(dio); | 
 | 627 | 		ret = dio_new_bio(dio, dio->cur_page_block); | 
 | 628 | 		if (ret == 0) { | 
 | 629 | 			ret = dio_bio_add_page(dio); | 
 | 630 | 			BUG_ON(ret != 0); | 
 | 631 | 		} | 
 | 632 | 	} | 
 | 633 | out: | 
 | 634 | 	return ret; | 
 | 635 | } | 
 | 636 |  | 
 | 637 | /* | 
 | 638 |  * An autonomous function to put a chunk of a page under deferred IO. | 
 | 639 |  * | 
 | 640 |  * The caller doesn't actually know (or care) whether this piece of page is in | 
 | 641 |  * a BIO, or is under IO or whatever.  We just take care of all possible  | 
 | 642 |  * situations here.  The separation between the logic of do_direct_IO() and | 
 | 643 |  * that of submit_page_section() is important for clarity.  Please don't break. | 
 | 644 |  * | 
 | 645 |  * The chunk of page starts on-disk at blocknr. | 
 | 646 |  * | 
 | 647 |  * We perform deferred IO, by recording the last-submitted page inside our | 
 | 648 |  * private part of the dio structure.  If possible, we just expand the IO | 
 | 649 |  * across that page here. | 
 | 650 |  * | 
 | 651 |  * If that doesn't work out then we put the old page into the bio and add this | 
 | 652 |  * page to the dio instead. | 
 | 653 |  */ | 
 | 654 | static int | 
 | 655 | submit_page_section(struct dio *dio, struct page *page, | 
 | 656 | 		unsigned offset, unsigned len, sector_t blocknr) | 
 | 657 | { | 
 | 658 | 	int ret = 0; | 
 | 659 |  | 
| Andrew Morton | 98c4d57 | 2006-12-10 02:19:47 -0800 | [diff] [blame] | 660 | 	if (dio->rw & WRITE) { | 
 | 661 | 		/* | 
 | 662 | 		 * Read accounting is performed in submit_bio() | 
 | 663 | 		 */ | 
 | 664 | 		task_io_account_write(len); | 
 | 665 | 	} | 
 | 666 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | 	/* | 
 | 668 | 	 * Can we just grow the current page's presence in the dio? | 
 | 669 | 	 */ | 
 | 670 | 	if (	(dio->cur_page == page) && | 
 | 671 | 		(dio->cur_page_offset + dio->cur_page_len == offset) && | 
 | 672 | 		(dio->cur_page_block + | 
 | 673 | 			(dio->cur_page_len >> dio->blkbits) == blocknr)) { | 
 | 674 | 		dio->cur_page_len += len; | 
 | 675 |  | 
 | 676 | 		/* | 
 | 677 | 		 * If dio->boundary then we want to schedule the IO now to | 
 | 678 | 		 * avoid metadata seeks. | 
 | 679 | 		 */ | 
 | 680 | 		if (dio->boundary) { | 
 | 681 | 			ret = dio_send_cur_page(dio); | 
 | 682 | 			page_cache_release(dio->cur_page); | 
 | 683 | 			dio->cur_page = NULL; | 
 | 684 | 		} | 
 | 685 | 		goto out; | 
 | 686 | 	} | 
 | 687 |  | 
 | 688 | 	/* | 
 | 689 | 	 * If there's a deferred page already there then send it. | 
 | 690 | 	 */ | 
 | 691 | 	if (dio->cur_page) { | 
 | 692 | 		ret = dio_send_cur_page(dio); | 
 | 693 | 		page_cache_release(dio->cur_page); | 
 | 694 | 		dio->cur_page = NULL; | 
 | 695 | 		if (ret) | 
 | 696 | 			goto out; | 
 | 697 | 	} | 
 | 698 |  | 
 | 699 | 	page_cache_get(page);		/* It is in dio */ | 
 | 700 | 	dio->cur_page = page; | 
 | 701 | 	dio->cur_page_offset = offset; | 
 | 702 | 	dio->cur_page_len = len; | 
 | 703 | 	dio->cur_page_block = blocknr; | 
 | 704 | out: | 
 | 705 | 	return ret; | 
 | 706 | } | 
 | 707 |  | 
 | 708 | /* | 
 | 709 |  * Clean any dirty buffers in the blockdev mapping which alias newly-created | 
 | 710 |  * file blocks.  Only called for S_ISREG files - blockdevs do not set | 
 | 711 |  * buffer_new | 
 | 712 |  */ | 
 | 713 | static void clean_blockdev_aliases(struct dio *dio) | 
 | 714 | { | 
 | 715 | 	unsigned i; | 
 | 716 | 	unsigned nblocks; | 
 | 717 |  | 
 | 718 | 	nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits; | 
 | 719 |  | 
 | 720 | 	for (i = 0; i < nblocks; i++) { | 
 | 721 | 		unmap_underlying_metadata(dio->map_bh.b_bdev, | 
 | 722 | 					dio->map_bh.b_blocknr + i); | 
 | 723 | 	} | 
 | 724 | } | 
 | 725 |  | 
 | 726 | /* | 
 | 727 |  * If we are not writing the entire block and get_block() allocated | 
 | 728 |  * the block for us, we need to fill-in the unused portion of the | 
 | 729 |  * block with zeros. This happens only if user-buffer, fileoffset or | 
 | 730 |  * io length is not filesystem block-size multiple. | 
 | 731 |  * | 
 | 732 |  * `end' is zero if we're doing the start of the IO, 1 at the end of the | 
 | 733 |  * IO. | 
 | 734 |  */ | 
 | 735 | static void dio_zero_block(struct dio *dio, int end) | 
 | 736 | { | 
 | 737 | 	unsigned dio_blocks_per_fs_block; | 
 | 738 | 	unsigned this_chunk_blocks;	/* In dio_blocks */ | 
 | 739 | 	unsigned this_chunk_bytes; | 
 | 740 | 	struct page *page; | 
 | 741 |  | 
 | 742 | 	dio->start_zero_done = 1; | 
 | 743 | 	if (!dio->blkfactor || !buffer_new(&dio->map_bh)) | 
 | 744 | 		return; | 
 | 745 |  | 
 | 746 | 	dio_blocks_per_fs_block = 1 << dio->blkfactor; | 
 | 747 | 	this_chunk_blocks = dio->block_in_file & (dio_blocks_per_fs_block - 1); | 
 | 748 |  | 
 | 749 | 	if (!this_chunk_blocks) | 
 | 750 | 		return; | 
 | 751 |  | 
 | 752 | 	/* | 
 | 753 | 	 * We need to zero out part of an fs block.  It is either at the | 
 | 754 | 	 * beginning or the end of the fs block. | 
 | 755 | 	 */ | 
 | 756 | 	if (end)  | 
 | 757 | 		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; | 
 | 758 |  | 
 | 759 | 	this_chunk_bytes = this_chunk_blocks << dio->blkbits; | 
 | 760 |  | 
| Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 761 | 	page = ZERO_PAGE(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | 	if (submit_page_section(dio, page, 0, this_chunk_bytes,  | 
 | 763 | 				dio->next_block_for_io)) | 
 | 764 | 		return; | 
 | 765 |  | 
 | 766 | 	dio->next_block_for_io += this_chunk_blocks; | 
 | 767 | } | 
 | 768 |  | 
 | 769 | /* | 
 | 770 |  * Walk the user pages, and the file, mapping blocks to disk and generating | 
 | 771 |  * a sequence of (page,offset,len,block) mappings.  These mappings are injected | 
 | 772 |  * into submit_page_section(), which takes care of the next stage of submission | 
 | 773 |  * | 
 | 774 |  * Direct IO against a blockdev is different from a file.  Because we can | 
 | 775 |  * happily perform page-sized but 512-byte aligned IOs.  It is important that | 
 | 776 |  * blockdev IO be able to have fine alignment and large sizes. | 
 | 777 |  * | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 778 |  * So what we do is to permit the ->get_block function to populate bh.b_size | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 |  * with the size of IO which is permitted at this offset and this i_blkbits. | 
 | 780 |  * | 
 | 781 |  * For best results, the blockdev should be set up with 512-byte i_blkbits and | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 782 |  * it should set b_size to PAGE_SIZE or more inside get_block().  This gives | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 |  * fine alignment but still allows this function to work in PAGE_SIZE units. | 
 | 784 |  */ | 
 | 785 | static int do_direct_IO(struct dio *dio) | 
 | 786 | { | 
 | 787 | 	const unsigned blkbits = dio->blkbits; | 
 | 788 | 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits; | 
 | 789 | 	struct page *page; | 
 | 790 | 	unsigned block_in_page; | 
 | 791 | 	struct buffer_head *map_bh = &dio->map_bh; | 
 | 792 | 	int ret = 0; | 
 | 793 |  | 
 | 794 | 	/* The I/O can start at any block offset within the first page */ | 
 | 795 | 	block_in_page = dio->first_block_in_page; | 
 | 796 |  | 
 | 797 | 	while (dio->block_in_file < dio->final_block_in_request) { | 
 | 798 | 		page = dio_get_page(dio); | 
 | 799 | 		if (IS_ERR(page)) { | 
 | 800 | 			ret = PTR_ERR(page); | 
 | 801 | 			goto out; | 
 | 802 | 		} | 
 | 803 |  | 
 | 804 | 		while (block_in_page < blocks_per_page) { | 
 | 805 | 			unsigned offset_in_page = block_in_page << blkbits; | 
 | 806 | 			unsigned this_chunk_bytes;	/* # of bytes mapped */ | 
 | 807 | 			unsigned this_chunk_blocks;	/* # of blocks */ | 
 | 808 | 			unsigned u; | 
 | 809 |  | 
 | 810 | 			if (dio->blocks_available == 0) { | 
 | 811 | 				/* | 
 | 812 | 				 * Need to go and map some more disk | 
 | 813 | 				 */ | 
 | 814 | 				unsigned long blkmask; | 
 | 815 | 				unsigned long dio_remainder; | 
 | 816 |  | 
 | 817 | 				ret = get_more_blocks(dio); | 
 | 818 | 				if (ret) { | 
 | 819 | 					page_cache_release(page); | 
 | 820 | 					goto out; | 
 | 821 | 				} | 
 | 822 | 				if (!buffer_mapped(map_bh)) | 
 | 823 | 					goto do_holes; | 
 | 824 |  | 
 | 825 | 				dio->blocks_available = | 
 | 826 | 						map_bh->b_size >> dio->blkbits; | 
 | 827 | 				dio->next_block_for_io = | 
 | 828 | 					map_bh->b_blocknr << dio->blkfactor; | 
 | 829 | 				if (buffer_new(map_bh)) | 
 | 830 | 					clean_blockdev_aliases(dio); | 
 | 831 |  | 
 | 832 | 				if (!dio->blkfactor) | 
 | 833 | 					goto do_holes; | 
 | 834 |  | 
 | 835 | 				blkmask = (1 << dio->blkfactor) - 1; | 
 | 836 | 				dio_remainder = (dio->block_in_file & blkmask); | 
 | 837 |  | 
 | 838 | 				/* | 
 | 839 | 				 * If we are at the start of IO and that IO | 
 | 840 | 				 * starts partway into a fs-block, | 
 | 841 | 				 * dio_remainder will be non-zero.  If the IO | 
 | 842 | 				 * is a read then we can simply advance the IO | 
 | 843 | 				 * cursor to the first block which is to be | 
 | 844 | 				 * read.  But if the IO is a write and the | 
 | 845 | 				 * block was newly allocated we cannot do that; | 
 | 846 | 				 * the start of the fs block must be zeroed out | 
 | 847 | 				 * on-disk | 
 | 848 | 				 */ | 
 | 849 | 				if (!buffer_new(map_bh)) | 
 | 850 | 					dio->next_block_for_io += dio_remainder; | 
 | 851 | 				dio->blocks_available -= dio_remainder; | 
 | 852 | 			} | 
 | 853 | do_holes: | 
 | 854 | 			/* Handle holes */ | 
 | 855 | 			if (!buffer_mapped(map_bh)) { | 
| Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 856 | 				loff_t i_size_aligned; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 |  | 
 | 858 | 				/* AKPM: eargh, -ENOTBLK is a hack */ | 
| Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 859 | 				if (dio->rw & WRITE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | 					page_cache_release(page); | 
 | 861 | 					return -ENOTBLK; | 
 | 862 | 				} | 
 | 863 |  | 
| Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 864 | 				/* | 
 | 865 | 				 * Be sure to account for a partial block as the | 
 | 866 | 				 * last block in the file | 
 | 867 | 				 */ | 
 | 868 | 				i_size_aligned = ALIGN(i_size_read(dio->inode), | 
 | 869 | 							1 << blkbits); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | 				if (dio->block_in_file >= | 
| Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 871 | 						i_size_aligned >> blkbits) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | 					/* We hit eof */ | 
 | 873 | 					page_cache_release(page); | 
 | 874 | 					goto out; | 
 | 875 | 				} | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 876 | 				zero_user(page, block_in_page << blkbits, | 
 | 877 | 						1 << blkbits); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | 				dio->block_in_file++; | 
 | 879 | 				block_in_page++; | 
 | 880 | 				goto next_block; | 
 | 881 | 			} | 
 | 882 |  | 
 | 883 | 			/* | 
 | 884 | 			 * If we're performing IO which has an alignment which | 
 | 885 | 			 * is finer than the underlying fs, go check to see if | 
 | 886 | 			 * we must zero out the start of this block. | 
 | 887 | 			 */ | 
 | 888 | 			if (unlikely(dio->blkfactor && !dio->start_zero_done)) | 
 | 889 | 				dio_zero_block(dio, 0); | 
 | 890 |  | 
 | 891 | 			/* | 
 | 892 | 			 * Work out, in this_chunk_blocks, how much disk we | 
 | 893 | 			 * can add to this page | 
 | 894 | 			 */ | 
 | 895 | 			this_chunk_blocks = dio->blocks_available; | 
 | 896 | 			u = (PAGE_SIZE - offset_in_page) >> blkbits; | 
 | 897 | 			if (this_chunk_blocks > u) | 
 | 898 | 				this_chunk_blocks = u; | 
 | 899 | 			u = dio->final_block_in_request - dio->block_in_file; | 
 | 900 | 			if (this_chunk_blocks > u) | 
 | 901 | 				this_chunk_blocks = u; | 
 | 902 | 			this_chunk_bytes = this_chunk_blocks << blkbits; | 
 | 903 | 			BUG_ON(this_chunk_bytes == 0); | 
 | 904 |  | 
 | 905 | 			dio->boundary = buffer_boundary(map_bh); | 
 | 906 | 			ret = submit_page_section(dio, page, offset_in_page, | 
 | 907 | 				this_chunk_bytes, dio->next_block_for_io); | 
 | 908 | 			if (ret) { | 
 | 909 | 				page_cache_release(page); | 
 | 910 | 				goto out; | 
 | 911 | 			} | 
 | 912 | 			dio->next_block_for_io += this_chunk_blocks; | 
 | 913 |  | 
 | 914 | 			dio->block_in_file += this_chunk_blocks; | 
 | 915 | 			block_in_page += this_chunk_blocks; | 
 | 916 | 			dio->blocks_available -= this_chunk_blocks; | 
 | 917 | next_block: | 
| Eric Sesterhenn | d4569d2 | 2006-04-01 01:10:13 +0200 | [diff] [blame] | 918 | 			BUG_ON(dio->block_in_file > dio->final_block_in_request); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | 			if (dio->block_in_file == dio->final_block_in_request) | 
 | 920 | 				break; | 
 | 921 | 		} | 
 | 922 |  | 
 | 923 | 		/* Drop the ref which was taken in get_user_pages() */ | 
 | 924 | 		page_cache_release(page); | 
 | 925 | 		block_in_page = 0; | 
 | 926 | 	} | 
 | 927 | out: | 
 | 928 | 	return ret; | 
 | 929 | } | 
 | 930 |  | 
 | 931 | /* | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 932 |  * Releases both i_mutex and i_alloc_sem | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 |  */ | 
 | 934 | static ssize_t | 
 | 935 | direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,  | 
 | 936 | 	const struct iovec *iov, loff_t offset, unsigned long nr_segs,  | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 937 | 	unsigned blkbits, get_block_t get_block, dio_iodone_t end_io, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | 	struct dio *dio) | 
 | 939 | { | 
 | 940 | 	unsigned long user_addr;  | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 941 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | 	int seg; | 
 | 943 | 	ssize_t ret = 0; | 
 | 944 | 	ssize_t ret2; | 
 | 945 | 	size_t bytes; | 
 | 946 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | 	dio->inode = inode; | 
 | 948 | 	dio->rw = rw; | 
 | 949 | 	dio->blkbits = blkbits; | 
 | 950 | 	dio->blkfactor = inode->i_blkbits - blkbits; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | 	dio->block_in_file = offset >> blkbits; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 |  | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 953 | 	dio->get_block = get_block; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | 	dio->end_io = end_io; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | 	dio->final_block_in_bio = -1; | 
 | 956 | 	dio->next_block_for_io = -1; | 
 | 957 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | 	dio->iocb = iocb; | 
| Daniel McNeil | 29504ff | 2005-04-16 15:25:50 -0700 | [diff] [blame] | 959 | 	dio->i_size = i_size_read(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | 	spin_lock_init(&dio->bio_lock); | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 962 | 	dio->refcount = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 |  | 
 | 964 | 	/* | 
 | 965 | 	 * In case of non-aligned buffers, we may need 2 more | 
 | 966 | 	 * pages since we need to zero out first and last block. | 
 | 967 | 	 */ | 
 | 968 | 	if (unlikely(dio->blkfactor)) | 
 | 969 | 		dio->pages_in_io = 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 |  | 
 | 971 | 	for (seg = 0; seg < nr_segs; seg++) { | 
 | 972 | 		user_addr = (unsigned long)iov[seg].iov_base; | 
 | 973 | 		dio->pages_in_io += | 
 | 974 | 			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE | 
 | 975 | 				- user_addr/PAGE_SIZE); | 
 | 976 | 	} | 
 | 977 |  | 
 | 978 | 	for (seg = 0; seg < nr_segs; seg++) { | 
 | 979 | 		user_addr = (unsigned long)iov[seg].iov_base; | 
 | 980 | 		dio->size += bytes = iov[seg].iov_len; | 
 | 981 |  | 
 | 982 | 		/* Index into the first page of the first block */ | 
 | 983 | 		dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits; | 
 | 984 | 		dio->final_block_in_request = dio->block_in_file + | 
 | 985 | 						(bytes >> blkbits); | 
 | 986 | 		/* Page fetching state */ | 
 | 987 | 		dio->head = 0; | 
 | 988 | 		dio->tail = 0; | 
 | 989 | 		dio->curr_page = 0; | 
 | 990 |  | 
 | 991 | 		dio->total_pages = 0; | 
 | 992 | 		if (user_addr & (PAGE_SIZE-1)) { | 
 | 993 | 			dio->total_pages++; | 
 | 994 | 			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1)); | 
 | 995 | 		} | 
 | 996 | 		dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE; | 
 | 997 | 		dio->curr_user_address = user_addr; | 
 | 998 | 	 | 
 | 999 | 		ret = do_direct_IO(dio); | 
 | 1000 |  | 
 | 1001 | 		dio->result += iov[seg].iov_len - | 
 | 1002 | 			((dio->final_block_in_request - dio->block_in_file) << | 
 | 1003 | 					blkbits); | 
 | 1004 |  | 
 | 1005 | 		if (ret) { | 
 | 1006 | 			dio_cleanup(dio); | 
 | 1007 | 			break; | 
 | 1008 | 		} | 
 | 1009 | 	} /* end iovec loop */ | 
 | 1010 |  | 
| Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 1011 | 	if (ret == -ENOTBLK && (rw & WRITE)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | 		/* | 
 | 1013 | 		 * The remaining part of the request will be | 
 | 1014 | 		 * be handled by buffered I/O when we return | 
 | 1015 | 		 */ | 
 | 1016 | 		ret = 0; | 
 | 1017 | 	} | 
 | 1018 | 	/* | 
 | 1019 | 	 * There may be some unwritten disk at the end of a part-written | 
 | 1020 | 	 * fs-block-sized block.  Go zero that now. | 
 | 1021 | 	 */ | 
 | 1022 | 	dio_zero_block(dio, 1); | 
 | 1023 |  | 
 | 1024 | 	if (dio->cur_page) { | 
 | 1025 | 		ret2 = dio_send_cur_page(dio); | 
 | 1026 | 		if (ret == 0) | 
 | 1027 | 			ret = ret2; | 
 | 1028 | 		page_cache_release(dio->cur_page); | 
 | 1029 | 		dio->cur_page = NULL; | 
 | 1030 | 	} | 
 | 1031 | 	if (dio->bio) | 
 | 1032 | 		dio_bio_submit(dio); | 
 | 1033 |  | 
 | 1034 | 	/* | 
 | 1035 | 	 * It is possible that, we return short IO due to end of file. | 
 | 1036 | 	 * In that case, we need to release all the pages we got hold on. | 
 | 1037 | 	 */ | 
 | 1038 | 	dio_cleanup(dio); | 
 | 1039 |  | 
 | 1040 | 	/* | 
 | 1041 | 	 * All block lookups have been performed. For READ requests | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1042 | 	 * we can let i_mutex go now that its achieved its purpose | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | 	 * of protecting us from looking up uninitialized blocks. | 
 | 1044 | 	 */ | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1045 | 	if (rw == READ && (dio->flags & DIO_LOCKING)) | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1046 | 		mutex_unlock(&dio->inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 |  | 
 | 1048 | 	/* | 
| Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 1049 | 	 * The only time we want to leave bios in flight is when a successful | 
 | 1050 | 	 * partial aio read or full aio write have been setup.  In that case | 
 | 1051 | 	 * bio completion will call aio_complete.  The only time it's safe to | 
 | 1052 | 	 * call aio_complete is when we return -EIOCBQUEUED, so we key on that. | 
 | 1053 | 	 * This had *better* be the only place that raises -EIOCBQUEUED. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | 	 */ | 
| Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 1055 | 	BUG_ON(ret == -EIOCBQUEUED); | 
 | 1056 | 	if (dio->is_async && ret == 0 && dio->result && | 
 | 1057 | 	    ((rw & READ) || (dio->result == dio->size))) | 
 | 1058 | 		ret = -EIOCBQUEUED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 |  | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 1060 | 	if (ret != -EIOCBQUEUED) { | 
 | 1061 | 		/* All IO is now issued, send it on its way */ | 
 | 1062 | 		blk_run_address_space(inode->i_mapping); | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 1063 | 		dio_await_completion(dio); | 
| Jeff Moyer | cfb1e33 | 2009-10-02 18:57:36 -0400 | [diff] [blame] | 1064 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 |  | 
| Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 1066 | 	/* | 
 | 1067 | 	 * Sync will always be dropping the final ref and completing the | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 1068 | 	 * operation.  AIO can if it was a broken operation described above or | 
 | 1069 | 	 * in fact if all the bios race to complete before we get here.  In | 
 | 1070 | 	 * that case dio_complete() translates the EIOCBQUEUED into the proper | 
 | 1071 | 	 * return code that the caller will hand to aio_complete(). | 
 | 1072 | 	 * | 
 | 1073 | 	 * This is managed by the bio_lock instead of being an atomic_t so that | 
 | 1074 | 	 * completion paths can drop their ref and use the remaining count to | 
 | 1075 | 	 * decide to wake the submission path atomically. | 
| Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 1076 | 	 */ | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 1077 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 1078 | 	ret2 = --dio->refcount; | 
 | 1079 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
| Zach Brown | fcb82f8 | 2007-07-03 15:28:55 -0700 | [diff] [blame] | 1080 |  | 
| Zach Brown | 5eb6c7a | 2006-12-10 02:21:07 -0800 | [diff] [blame] | 1081 | 	if (ret2 == 0) { | 
| Zach Brown | 6d544bb | 2006-12-10 02:20:54 -0800 | [diff] [blame] | 1082 | 		ret = dio_complete(dio, offset, ret); | 
| Zach Brown | 8459d86 | 2006-12-10 02:21:05 -0800 | [diff] [blame] | 1083 | 		kfree(dio); | 
 | 1084 | 	} else | 
 | 1085 | 		BUG_ON(ret != -EIOCBQUEUED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | 	return ret; | 
 | 1088 | } | 
 | 1089 |  | 
 | 1090 | /* | 
 | 1091 |  * This is a library function for use by filesystem drivers. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 |  * | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1093 |  * The locking rules are governed by the flags parameter: | 
 | 1094 |  *  - if the flags value contains DIO_LOCKING we use a fancy locking | 
 | 1095 |  *    scheme for dumb filesystems. | 
 | 1096 |  *    For writes this function is called under i_mutex and returns with | 
 | 1097 |  *    i_mutex held, for reads, i_mutex is not held on entry, but it is | 
 | 1098 |  *    taken and dropped again before returning. | 
 | 1099 |  *    For reads and writes i_alloc_sem is taken in shared mode and released | 
 | 1100 |  *    on I/O completion (which may happen asynchronously after returning to | 
 | 1101 |  *    the caller). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 |  * | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1103 |  *  - if the flags value does NOT contain DIO_LOCKING we don't use any | 
 | 1104 |  *    internal locking but rather rely on the filesystem to synchronize | 
 | 1105 |  *    direct I/O reads/writes versus each other and truncate. | 
 | 1106 |  *    For reads and writes both i_mutex and i_alloc_sem are not held on | 
 | 1107 |  *    entry and are never taken. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 |  */ | 
 | 1109 | ssize_t | 
 | 1110 | __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 
 | 1111 | 	struct block_device *bdev, const struct iovec *iov, loff_t offset,  | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 1112 | 	unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1113 | 	int flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | { | 
 | 1115 | 	int seg; | 
 | 1116 | 	size_t size; | 
 | 1117 | 	unsigned long addr; | 
 | 1118 | 	unsigned blkbits = inode->i_blkbits; | 
 | 1119 | 	unsigned bdev_blkbits = 0; | 
 | 1120 | 	unsigned blocksize_mask = (1 << blkbits) - 1; | 
 | 1121 | 	ssize_t retval = -EINVAL; | 
 | 1122 | 	loff_t end = offset; | 
 | 1123 | 	struct dio *dio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 |  | 
 | 1125 | 	if (rw & WRITE) | 
| Vivek Goyal | d9449ce | 2009-11-26 09:45:40 +0100 | [diff] [blame] | 1126 | 		rw = WRITE_ODIRECT_PLUG; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 |  | 
 | 1128 | 	if (bdev) | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1129 | 		bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 |  | 
 | 1131 | 	if (offset & blocksize_mask) { | 
 | 1132 | 		if (bdev) | 
 | 1133 | 			 blkbits = bdev_blkbits; | 
 | 1134 | 		blocksize_mask = (1 << blkbits) - 1; | 
 | 1135 | 		if (offset & blocksize_mask) | 
 | 1136 | 			goto out; | 
 | 1137 | 	} | 
 | 1138 |  | 
 | 1139 | 	/* Check the memory alignment.  Blocks cannot straddle pages */ | 
 | 1140 | 	for (seg = 0; seg < nr_segs; seg++) { | 
 | 1141 | 		addr = (unsigned long)iov[seg].iov_base; | 
 | 1142 | 		size = iov[seg].iov_len; | 
 | 1143 | 		end += size; | 
 | 1144 | 		if ((addr & blocksize_mask) || (size & blocksize_mask))  { | 
 | 1145 | 			if (bdev) | 
 | 1146 | 				 blkbits = bdev_blkbits; | 
 | 1147 | 			blocksize_mask = (1 << blkbits) - 1; | 
 | 1148 | 			if ((addr & blocksize_mask) || (size & blocksize_mask))   | 
 | 1149 | 				goto out; | 
 | 1150 | 		} | 
 | 1151 | 	} | 
 | 1152 |  | 
| Jeff Moyer | 23aee09 | 2009-12-15 16:47:49 -0800 | [diff] [blame] | 1153 | 	dio = kmalloc(sizeof(*dio), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | 	retval = -ENOMEM; | 
 | 1155 | 	if (!dio) | 
 | 1156 | 		goto out; | 
| Jeff Moyer | 23aee09 | 2009-12-15 16:47:49 -0800 | [diff] [blame] | 1157 | 	/* | 
 | 1158 | 	 * Believe it or not, zeroing out the page array caused a .5% | 
 | 1159 | 	 * performance regression in a database benchmark.  So, we take | 
 | 1160 | 	 * care to only zero out what's needed. | 
 | 1161 | 	 */ | 
 | 1162 | 	memset(dio, 0, offsetof(struct dio, pages)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 |  | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1164 | 	dio->flags = flags; | 
 | 1165 | 	if (dio->flags & DIO_LOCKING) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | 		/* watch out for a 0 len io from a tricksy fs */ | 
 | 1167 | 		if (rw == READ && end > offset) { | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1168 | 			struct address_space *mapping = | 
 | 1169 | 					iocb->ki_filp->f_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 |  | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1171 | 			/* will be released by direct_io_worker */ | 
 | 1172 | 			mutex_lock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 |  | 
 | 1174 | 			retval = filemap_write_and_wait_range(mapping, offset, | 
 | 1175 | 							      end - 1); | 
 | 1176 | 			if (retval) { | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1177 | 				mutex_unlock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | 				kfree(dio); | 
 | 1179 | 				goto out; | 
 | 1180 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | 		} | 
 | 1182 |  | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1183 | 		/* | 
 | 1184 | 		 * Will be released at I/O completion, possibly in a | 
 | 1185 | 		 * different thread. | 
 | 1186 | 		 */ | 
 | 1187 | 		down_read_non_owner(&inode->i_alloc_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | 	} | 
 | 1189 |  | 
 | 1190 | 	/* | 
 | 1191 | 	 * For file extending writes updating i_size before data | 
 | 1192 | 	 * writeouts complete can expose uninitialized blocks. So | 
 | 1193 | 	 * even for AIO, we need to wait for i/o to complete before | 
 | 1194 | 	 * returning in this case. | 
 | 1195 | 	 */ | 
| Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 1196 | 	dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | 		(end > i_size_read(inode))); | 
 | 1198 |  | 
 | 1199 | 	retval = direct_io_worker(rw, iocb, inode, iov, offset, | 
| Badari Pulavarty | 1d8fa7a | 2006-03-26 01:38:02 -0800 | [diff] [blame] | 1200 | 				nr_segs, blkbits, get_block, end_io, dio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 |  | 
| Dmitri Monakhov | 0f64415 | 2009-01-06 14:40:04 -0800 | [diff] [blame] | 1202 | 	/* | 
 | 1203 | 	 * In case of error extending write may have instantiated a few | 
 | 1204 | 	 * blocks outside i_size. Trim these off again for DIO_LOCKING. | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1205 | 	 * | 
 | 1206 | 	 * NOTE: filesystems with their own locking have to handle this | 
 | 1207 | 	 * on their own. | 
| Dmitri Monakhov | 0f64415 | 2009-01-06 14:40:04 -0800 | [diff] [blame] | 1208 | 	 */ | 
| Al Viro | 06777d3 | 2009-12-17 04:52:13 -0500 | [diff] [blame] | 1209 | 	if (flags & DIO_LOCKING) { | 
| Christoph Hellwig | 5fe878a | 2009-12-15 16:47:50 -0800 | [diff] [blame] | 1210 | 		if (unlikely((rw & WRITE) && retval < 0)) { | 
 | 1211 | 			loff_t isize = i_size_read(inode); | 
 | 1212 | 			if (end > isize) | 
 | 1213 | 				vmtruncate(inode, isize); | 
 | 1214 | 		} | 
| Dmitri Monakhov | 0f64415 | 2009-01-06 14:40:04 -0800 | [diff] [blame] | 1215 | 	} | 
 | 1216 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | 	return retval; | 
 | 1219 | } | 
 | 1220 | EXPORT_SYMBOL(__blockdev_direct_IO); |