| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * fs/direct-io.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
 | 5 |  * | 
 | 6 |  * O_DIRECT | 
 | 7 |  * | 
 | 8 |  * 04Jul2002	akpm@zip.com.au | 
 | 9 |  *		Initial version | 
 | 10 |  * 11Sep2002	janetinc@us.ibm.com | 
 | 11 |  * 		added readv/writev support. | 
 | 12 |  * 29Oct2002	akpm@zip.com.au | 
 | 13 |  *		rewrote bio_add_page() support. | 
 | 14 |  * 30Oct2002	pbadari@us.ibm.com | 
 | 15 |  *		added support for non-aligned IO. | 
 | 16 |  * 06Nov2002	pbadari@us.ibm.com | 
 | 17 |  *		added asynchronous IO support. | 
 | 18 |  * 21Jul2003	nathans@sgi.com | 
 | 19 |  *		added IO completion notifier. | 
 | 20 |  */ | 
 | 21 |  | 
 | 22 | #include <linux/kernel.h> | 
 | 23 | #include <linux/module.h> | 
 | 24 | #include <linux/types.h> | 
 | 25 | #include <linux/fs.h> | 
 | 26 | #include <linux/mm.h> | 
 | 27 | #include <linux/slab.h> | 
 | 28 | #include <linux/highmem.h> | 
 | 29 | #include <linux/pagemap.h> | 
 | 30 | #include <linux/bio.h> | 
 | 31 | #include <linux/wait.h> | 
 | 32 | #include <linux/err.h> | 
 | 33 | #include <linux/blkdev.h> | 
 | 34 | #include <linux/buffer_head.h> | 
 | 35 | #include <linux/rwsem.h> | 
 | 36 | #include <linux/uio.h> | 
 | 37 | #include <asm/atomic.h> | 
 | 38 |  | 
 | 39 | /* | 
 | 40 |  * How many user pages to map in one call to get_user_pages().  This determines | 
 | 41 |  * the size of a structure on the stack. | 
 | 42 |  */ | 
 | 43 | #define DIO_PAGES	64 | 
 | 44 |  | 
 | 45 | /* | 
 | 46 |  * This code generally works in units of "dio_blocks".  A dio_block is | 
 | 47 |  * somewhere between the hard sector size and the filesystem block size.  it | 
 | 48 |  * is determined on a per-invocation basis.   When talking to the filesystem | 
 | 49 |  * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity | 
 | 50 |  * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted | 
 | 51 |  * to bio_block quantities by shifting left by blkfactor. | 
 | 52 |  * | 
 | 53 |  * If blkfactor is zero then the user's request was aligned to the filesystem's | 
 | 54 |  * blocksize. | 
 | 55 |  * | 
 | 56 |  * lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems. | 
 | 57 |  * This determines whether we need to do the fancy locking which prevents | 
 | 58 |  * direct-IO from being able to read uninitialised disk blocks.  If its zero | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 59 |  * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 |  * not held for the entire direct write (taken briefly, initially, during a | 
 | 61 |  * direct read though, but its never held for the duration of a direct-IO). | 
 | 62 |  */ | 
 | 63 |  | 
 | 64 | struct dio { | 
 | 65 | 	/* BIO submission state */ | 
 | 66 | 	struct bio *bio;		/* bio under assembly */ | 
 | 67 | 	struct inode *inode; | 
 | 68 | 	int rw; | 
| Daniel McNeil | 29504ff | 2005-04-16 15:25:50 -0700 | [diff] [blame] | 69 | 	loff_t i_size;			/* i_size when submitted */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | 	int lock_type;			/* doesn't change */ | 
 | 71 | 	unsigned blkbits;		/* doesn't change */ | 
 | 72 | 	unsigned blkfactor;		/* When we're using an alignment which | 
 | 73 | 					   is finer than the filesystem's soft | 
 | 74 | 					   blocksize, this specifies how much | 
 | 75 | 					   finer.  blkfactor=2 means 1/4-block | 
 | 76 | 					   alignment.  Does not change */ | 
 | 77 | 	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has | 
 | 78 | 					   been performed at the start of a | 
 | 79 | 					   write */ | 
 | 80 | 	int pages_in_io;		/* approximate total IO pages */ | 
 | 81 | 	size_t	size;			/* total request size (doesn't change)*/ | 
 | 82 | 	sector_t block_in_file;		/* Current offset into the underlying | 
 | 83 | 					   file in dio_block units. */ | 
 | 84 | 	unsigned blocks_available;	/* At block_in_file.  changes */ | 
 | 85 | 	sector_t final_block_in_request;/* doesn't change */ | 
 | 86 | 	unsigned first_block_in_page;	/* doesn't change, Used only once */ | 
 | 87 | 	int boundary;			/* prev block is at a boundary */ | 
 | 88 | 	int reap_counter;		/* rate limit reaping */ | 
 | 89 | 	get_blocks_t *get_blocks;	/* block mapping function */ | 
 | 90 | 	dio_iodone_t *end_io;		/* IO completion function */ | 
 | 91 | 	sector_t final_block_in_bio;	/* current final block in bio + 1 */ | 
 | 92 | 	sector_t next_block_for_io;	/* next block to be put under IO, | 
 | 93 | 					   in dio_blocks units */ | 
 | 94 | 	struct buffer_head map_bh;	/* last get_blocks() result */ | 
 | 95 |  | 
 | 96 | 	/* | 
 | 97 | 	 * Deferred addition of a page to the dio.  These variables are | 
 | 98 | 	 * private to dio_send_cur_page(), submit_page_section() and | 
 | 99 | 	 * dio_bio_add_page(). | 
 | 100 | 	 */ | 
 | 101 | 	struct page *cur_page;		/* The page */ | 
 | 102 | 	unsigned cur_page_offset;	/* Offset into it, in bytes */ | 
 | 103 | 	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */ | 
 | 104 | 	sector_t cur_page_block;	/* Where it starts */ | 
 | 105 |  | 
 | 106 | 	/* | 
 | 107 | 	 * Page fetching state. These variables belong to dio_refill_pages(). | 
 | 108 | 	 */ | 
 | 109 | 	int curr_page;			/* changes */ | 
 | 110 | 	int total_pages;		/* doesn't change */ | 
 | 111 | 	unsigned long curr_user_address;/* changes */ | 
 | 112 |  | 
 | 113 | 	/* | 
 | 114 | 	 * Page queue.  These variables belong to dio_refill_pages() and | 
 | 115 | 	 * dio_get_page(). | 
 | 116 | 	 */ | 
 | 117 | 	struct page *pages[DIO_PAGES];	/* page buffer */ | 
 | 118 | 	unsigned head;			/* next page to process */ | 
 | 119 | 	unsigned tail;			/* last valid page + 1 */ | 
 | 120 | 	int page_errors;		/* errno from get_user_pages() */ | 
 | 121 |  | 
 | 122 | 	/* BIO completion state */ | 
 | 123 | 	spinlock_t bio_lock;		/* protects BIO fields below */ | 
 | 124 | 	int bio_count;			/* nr bios to be completed */ | 
 | 125 | 	int bios_in_flight;		/* nr bios in flight */ | 
 | 126 | 	struct bio *bio_list;		/* singly linked via bi_private */ | 
 | 127 | 	struct task_struct *waiter;	/* waiting task (NULL if none) */ | 
 | 128 |  | 
 | 129 | 	/* AIO related stuff */ | 
 | 130 | 	struct kiocb *iocb;		/* kiocb */ | 
 | 131 | 	int is_async;			/* is IO async ? */ | 
 | 132 | 	ssize_t result;                 /* IO result */ | 
 | 133 | }; | 
 | 134 |  | 
 | 135 | /* | 
 | 136 |  * How many pages are in the queue? | 
 | 137 |  */ | 
 | 138 | static inline unsigned dio_pages_present(struct dio *dio) | 
 | 139 | { | 
 | 140 | 	return dio->tail - dio->head; | 
 | 141 | } | 
 | 142 |  | 
 | 143 | /* | 
 | 144 |  * Go grab and pin some userspace pages.   Typically we'll get 64 at a time. | 
 | 145 |  */ | 
 | 146 | static int dio_refill_pages(struct dio *dio) | 
 | 147 | { | 
 | 148 | 	int ret; | 
 | 149 | 	int nr_pages; | 
 | 150 |  | 
 | 151 | 	nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES); | 
 | 152 | 	down_read(¤t->mm->mmap_sem); | 
 | 153 | 	ret = get_user_pages( | 
 | 154 | 		current,			/* Task for fault acounting */ | 
 | 155 | 		current->mm,			/* whose pages? */ | 
 | 156 | 		dio->curr_user_address,		/* Where from? */ | 
 | 157 | 		nr_pages,			/* How many pages? */ | 
 | 158 | 		dio->rw == READ,		/* Write to memory? */ | 
 | 159 | 		0,				/* force (?) */ | 
 | 160 | 		&dio->pages[0], | 
 | 161 | 		NULL);				/* vmas */ | 
 | 162 | 	up_read(¤t->mm->mmap_sem); | 
 | 163 |  | 
 | 164 | 	if (ret < 0 && dio->blocks_available && (dio->rw == WRITE)) { | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 165 | 		struct page *page = ZERO_PAGE(dio->curr_user_address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | 		/* | 
 | 167 | 		 * A memory fault, but the filesystem has some outstanding | 
 | 168 | 		 * mapped blocks.  We need to use those blocks up to avoid | 
 | 169 | 		 * leaking stale data in the file. | 
 | 170 | 		 */ | 
 | 171 | 		if (dio->page_errors == 0) | 
 | 172 | 			dio->page_errors = ret; | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 173 | 		page_cache_get(page); | 
 | 174 | 		dio->pages[0] = page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | 		dio->head = 0; | 
 | 176 | 		dio->tail = 1; | 
 | 177 | 		ret = 0; | 
 | 178 | 		goto out; | 
 | 179 | 	} | 
 | 180 |  | 
 | 181 | 	if (ret >= 0) { | 
 | 182 | 		dio->curr_user_address += ret * PAGE_SIZE; | 
 | 183 | 		dio->curr_page += ret; | 
 | 184 | 		dio->head = 0; | 
 | 185 | 		dio->tail = ret; | 
 | 186 | 		ret = 0; | 
 | 187 | 	} | 
 | 188 | out: | 
 | 189 | 	return ret;	 | 
 | 190 | } | 
 | 191 |  | 
 | 192 | /* | 
 | 193 |  * Get another userspace page.  Returns an ERR_PTR on error.  Pages are | 
 | 194 |  * buffered inside the dio so that we can call get_user_pages() against a | 
 | 195 |  * decent number of pages, less frequently.  To provide nicer use of the | 
 | 196 |  * L1 cache. | 
 | 197 |  */ | 
 | 198 | static struct page *dio_get_page(struct dio *dio) | 
 | 199 | { | 
 | 200 | 	if (dio_pages_present(dio) == 0) { | 
 | 201 | 		int ret; | 
 | 202 |  | 
 | 203 | 		ret = dio_refill_pages(dio); | 
 | 204 | 		if (ret) | 
 | 205 | 			return ERR_PTR(ret); | 
 | 206 | 		BUG_ON(dio_pages_present(dio) == 0); | 
 | 207 | 	} | 
 | 208 | 	return dio->pages[dio->head++]; | 
 | 209 | } | 
 | 210 |  | 
 | 211 | /* | 
 | 212 |  * Called when all DIO BIO I/O has been completed - let the filesystem | 
 | 213 |  * know, if it registered an interest earlier via get_blocks.  Pass the | 
 | 214 |  * private field of the map buffer_head so that filesystems can use it | 
 | 215 |  * to hold additional state between get_blocks calls and dio_complete. | 
 | 216 |  */ | 
 | 217 | static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes) | 
 | 218 | { | 
 | 219 | 	if (dio->end_io && dio->result) | 
| Christoph Hellwig | 92198f7 | 2005-06-23 22:00:59 -0700 | [diff] [blame] | 220 | 		dio->end_io(dio->iocb, offset, bytes, dio->map_bh.b_private); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | 	if (dio->lock_type == DIO_LOCKING) | 
 | 222 | 		up_read(&dio->inode->i_alloc_sem); | 
 | 223 | } | 
 | 224 |  | 
 | 225 | /* | 
 | 226 |  * Called when a BIO has been processed.  If the count goes to zero then IO is | 
 | 227 |  * complete and we can signal this to the AIO layer. | 
 | 228 |  */ | 
 | 229 | static void finished_one_bio(struct dio *dio) | 
 | 230 | { | 
 | 231 | 	unsigned long flags; | 
 | 232 |  | 
 | 233 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 234 | 	if (dio->bio_count == 1) { | 
 | 235 | 		if (dio->is_async) { | 
| Daniel McNeil | 29504ff | 2005-04-16 15:25:50 -0700 | [diff] [blame] | 236 | 			ssize_t transferred; | 
 | 237 | 			loff_t offset; | 
 | 238 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | 			/* | 
 | 240 | 			 * Last reference to the dio is going away. | 
 | 241 | 			 * Drop spinlock and complete the DIO. | 
 | 242 | 			 */ | 
 | 243 | 			spin_unlock_irqrestore(&dio->bio_lock, flags); | 
| Daniel McNeil | 29504ff | 2005-04-16 15:25:50 -0700 | [diff] [blame] | 244 |  | 
 | 245 | 			/* Check for short read case */ | 
 | 246 | 			transferred = dio->result; | 
 | 247 | 			offset = dio->iocb->ki_pos; | 
 | 248 |  | 
 | 249 | 			if ((dio->rw == READ) && | 
 | 250 | 			    ((offset + transferred) > dio->i_size)) | 
 | 251 | 				transferred = dio->i_size - offset; | 
 | 252 |  | 
 | 253 | 			dio_complete(dio, offset, transferred); | 
 | 254 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | 			/* Complete AIO later if falling back to buffered i/o */ | 
 | 256 | 			if (dio->result == dio->size || | 
 | 257 | 				((dio->rw == READ) && dio->result)) { | 
| Daniel McNeil | 29504ff | 2005-04-16 15:25:50 -0700 | [diff] [blame] | 258 | 				aio_complete(dio->iocb, transferred, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | 				kfree(dio); | 
 | 260 | 				return; | 
 | 261 | 			} else { | 
 | 262 | 				/* | 
 | 263 | 				 * Falling back to buffered | 
 | 264 | 				 */ | 
 | 265 | 				spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 266 | 				dio->bio_count--; | 
 | 267 | 				if (dio->waiter) | 
 | 268 | 					wake_up_process(dio->waiter); | 
 | 269 | 				spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 270 | 				return; | 
 | 271 | 			} | 
 | 272 | 		} | 
 | 273 | 	} | 
 | 274 | 	dio->bio_count--; | 
 | 275 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 276 | } | 
 | 277 |  | 
 | 278 | static int dio_bio_complete(struct dio *dio, struct bio *bio); | 
 | 279 | /* | 
 | 280 |  * Asynchronous IO callback.  | 
 | 281 |  */ | 
 | 282 | static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error) | 
 | 283 | { | 
 | 284 | 	struct dio *dio = bio->bi_private; | 
 | 285 |  | 
 | 286 | 	if (bio->bi_size) | 
 | 287 | 		return 1; | 
 | 288 |  | 
 | 289 | 	/* cleanup the bio */ | 
 | 290 | 	dio_bio_complete(dio, bio); | 
 | 291 | 	return 0; | 
 | 292 | } | 
 | 293 |  | 
 | 294 | /* | 
 | 295 |  * The BIO completion handler simply queues the BIO up for the process-context | 
 | 296 |  * handler. | 
 | 297 |  * | 
 | 298 |  * During I/O bi_private points at the dio.  After I/O, bi_private is used to | 
 | 299 |  * implement a singly-linked list of completed BIOs, at dio->bio_list. | 
 | 300 |  */ | 
 | 301 | static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error) | 
 | 302 | { | 
 | 303 | 	struct dio *dio = bio->bi_private; | 
 | 304 | 	unsigned long flags; | 
 | 305 |  | 
 | 306 | 	if (bio->bi_size) | 
 | 307 | 		return 1; | 
 | 308 |  | 
 | 309 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 310 | 	bio->bi_private = dio->bio_list; | 
 | 311 | 	dio->bio_list = bio; | 
 | 312 | 	dio->bios_in_flight--; | 
 | 313 | 	if (dio->waiter && dio->bios_in_flight == 0) | 
 | 314 | 		wake_up_process(dio->waiter); | 
 | 315 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 316 | 	return 0; | 
 | 317 | } | 
 | 318 |  | 
 | 319 | static int | 
 | 320 | dio_bio_alloc(struct dio *dio, struct block_device *bdev, | 
 | 321 | 		sector_t first_sector, int nr_vecs) | 
 | 322 | { | 
 | 323 | 	struct bio *bio; | 
 | 324 |  | 
 | 325 | 	bio = bio_alloc(GFP_KERNEL, nr_vecs); | 
 | 326 | 	if (bio == NULL) | 
 | 327 | 		return -ENOMEM; | 
 | 328 |  | 
 | 329 | 	bio->bi_bdev = bdev; | 
 | 330 | 	bio->bi_sector = first_sector; | 
 | 331 | 	if (dio->is_async) | 
 | 332 | 		bio->bi_end_io = dio_bio_end_aio; | 
 | 333 | 	else | 
 | 334 | 		bio->bi_end_io = dio_bio_end_io; | 
 | 335 |  | 
 | 336 | 	dio->bio = bio; | 
 | 337 | 	return 0; | 
 | 338 | } | 
 | 339 |  | 
 | 340 | /* | 
 | 341 |  * In the AIO read case we speculatively dirty the pages before starting IO. | 
 | 342 |  * During IO completion, any of these pages which happen to have been written | 
 | 343 |  * back will be redirtied by bio_check_pages_dirty(). | 
 | 344 |  */ | 
 | 345 | static void dio_bio_submit(struct dio *dio) | 
 | 346 | { | 
 | 347 | 	struct bio *bio = dio->bio; | 
 | 348 | 	unsigned long flags; | 
 | 349 |  | 
 | 350 | 	bio->bi_private = dio; | 
 | 351 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 352 | 	dio->bio_count++; | 
 | 353 | 	dio->bios_in_flight++; | 
 | 354 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 355 | 	if (dio->is_async && dio->rw == READ) | 
 | 356 | 		bio_set_pages_dirty(bio); | 
 | 357 | 	submit_bio(dio->rw, bio); | 
 | 358 |  | 
 | 359 | 	dio->bio = NULL; | 
 | 360 | 	dio->boundary = 0; | 
 | 361 | } | 
 | 362 |  | 
 | 363 | /* | 
 | 364 |  * Release any resources in case of a failure | 
 | 365 |  */ | 
 | 366 | static void dio_cleanup(struct dio *dio) | 
 | 367 | { | 
 | 368 | 	while (dio_pages_present(dio)) | 
 | 369 | 		page_cache_release(dio_get_page(dio)); | 
 | 370 | } | 
 | 371 |  | 
 | 372 | /* | 
 | 373 |  * Wait for the next BIO to complete.  Remove it and return it. | 
 | 374 |  */ | 
 | 375 | static struct bio *dio_await_one(struct dio *dio) | 
 | 376 | { | 
 | 377 | 	unsigned long flags; | 
 | 378 | 	struct bio *bio; | 
 | 379 |  | 
 | 380 | 	spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 381 | 	while (dio->bio_list == NULL) { | 
 | 382 | 		set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 383 | 		if (dio->bio_list == NULL) { | 
 | 384 | 			dio->waiter = current; | 
 | 385 | 			spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 386 | 			blk_run_address_space(dio->inode->i_mapping); | 
 | 387 | 			io_schedule(); | 
 | 388 | 			spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 389 | 			dio->waiter = NULL; | 
 | 390 | 		} | 
 | 391 | 		set_current_state(TASK_RUNNING); | 
 | 392 | 	} | 
 | 393 | 	bio = dio->bio_list; | 
 | 394 | 	dio->bio_list = bio->bi_private; | 
 | 395 | 	spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 396 | 	return bio; | 
 | 397 | } | 
 | 398 |  | 
 | 399 | /* | 
 | 400 |  * Process one completed BIO.  No locks are held. | 
 | 401 |  */ | 
 | 402 | static int dio_bio_complete(struct dio *dio, struct bio *bio) | 
 | 403 | { | 
 | 404 | 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 
 | 405 | 	struct bio_vec *bvec = bio->bi_io_vec; | 
 | 406 | 	int page_no; | 
 | 407 |  | 
 | 408 | 	if (!uptodate) | 
 | 409 | 		dio->result = -EIO; | 
 | 410 |  | 
 | 411 | 	if (dio->is_async && dio->rw == READ) { | 
 | 412 | 		bio_check_pages_dirty(bio);	/* transfers ownership */ | 
 | 413 | 	} else { | 
 | 414 | 		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) { | 
 | 415 | 			struct page *page = bvec[page_no].bv_page; | 
 | 416 |  | 
 | 417 | 			if (dio->rw == READ && !PageCompound(page)) | 
 | 418 | 				set_page_dirty_lock(page); | 
 | 419 | 			page_cache_release(page); | 
 | 420 | 		} | 
 | 421 | 		bio_put(bio); | 
 | 422 | 	} | 
 | 423 | 	finished_one_bio(dio); | 
 | 424 | 	return uptodate ? 0 : -EIO; | 
 | 425 | } | 
 | 426 |  | 
 | 427 | /* | 
 | 428 |  * Wait on and process all in-flight BIOs. | 
 | 429 |  */ | 
 | 430 | static int dio_await_completion(struct dio *dio) | 
 | 431 | { | 
 | 432 | 	int ret = 0; | 
 | 433 |  | 
 | 434 | 	if (dio->bio) | 
 | 435 | 		dio_bio_submit(dio); | 
 | 436 |  | 
 | 437 | 	/* | 
 | 438 | 	 * The bio_lock is not held for the read of bio_count. | 
 | 439 | 	 * This is ok since it is the dio_bio_complete() that changes | 
 | 440 | 	 * bio_count. | 
 | 441 | 	 */ | 
 | 442 | 	while (dio->bio_count) { | 
 | 443 | 		struct bio *bio = dio_await_one(dio); | 
 | 444 | 		int ret2; | 
 | 445 |  | 
 | 446 | 		ret2 = dio_bio_complete(dio, bio); | 
 | 447 | 		if (ret == 0) | 
 | 448 | 			ret = ret2; | 
 | 449 | 	} | 
 | 450 | 	return ret; | 
 | 451 | } | 
 | 452 |  | 
 | 453 | /* | 
 | 454 |  * A really large O_DIRECT read or write can generate a lot of BIOs.  So | 
 | 455 |  * to keep the memory consumption sane we periodically reap any completed BIOs | 
 | 456 |  * during the BIO generation phase. | 
 | 457 |  * | 
 | 458 |  * This also helps to limit the peak amount of pinned userspace memory. | 
 | 459 |  */ | 
 | 460 | static int dio_bio_reap(struct dio *dio) | 
 | 461 | { | 
 | 462 | 	int ret = 0; | 
 | 463 |  | 
 | 464 | 	if (dio->reap_counter++ >= 64) { | 
 | 465 | 		while (dio->bio_list) { | 
 | 466 | 			unsigned long flags; | 
 | 467 | 			struct bio *bio; | 
 | 468 | 			int ret2; | 
 | 469 |  | 
 | 470 | 			spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 471 | 			bio = dio->bio_list; | 
 | 472 | 			dio->bio_list = bio->bi_private; | 
 | 473 | 			spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 474 | 			ret2 = dio_bio_complete(dio, bio); | 
 | 475 | 			if (ret == 0) | 
 | 476 | 				ret = ret2; | 
 | 477 | 		} | 
 | 478 | 		dio->reap_counter = 0; | 
 | 479 | 	} | 
 | 480 | 	return ret; | 
 | 481 | } | 
 | 482 |  | 
 | 483 | /* | 
 | 484 |  * Call into the fs to map some more disk blocks.  We record the current number | 
 | 485 |  * of available blocks at dio->blocks_available.  These are in units of the | 
 | 486 |  * fs blocksize, (1 << inode->i_blkbits). | 
 | 487 |  * | 
 | 488 |  * The fs is allowed to map lots of blocks at once.  If it wants to do that, | 
 | 489 |  * it uses the passed inode-relative block number as the file offset, as usual. | 
 | 490 |  * | 
 | 491 |  * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io | 
 | 492 |  * has remaining to do.  The fs should not map more than this number of blocks. | 
 | 493 |  * | 
 | 494 |  * If the fs has mapped a lot of blocks, it should populate bh->b_size to | 
 | 495 |  * indicate how much contiguous disk space has been made available at | 
 | 496 |  * bh->b_blocknr. | 
 | 497 |  * | 
 | 498 |  * If *any* of the mapped blocks are new, then the fs must set buffer_new(). | 
 | 499 |  * This isn't very efficient... | 
 | 500 |  * | 
 | 501 |  * In the case of filesystem holes: the fs may return an arbitrarily-large | 
 | 502 |  * hole by returning an appropriate value in b_size and by clearing | 
 | 503 |  * buffer_mapped().  However the direct-io code will only process holes one | 
 | 504 |  * block at a time - it will repeatedly call get_blocks() as it walks the hole. | 
 | 505 |  */ | 
 | 506 | static int get_more_blocks(struct dio *dio) | 
 | 507 | { | 
 | 508 | 	int ret; | 
 | 509 | 	struct buffer_head *map_bh = &dio->map_bh; | 
 | 510 | 	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */ | 
 | 511 | 	unsigned long fs_count;	/* Number of filesystem-sized blocks */ | 
 | 512 | 	unsigned long dio_count;/* Number of dio_block-sized blocks */ | 
 | 513 | 	unsigned long blkmask; | 
 | 514 | 	int create; | 
 | 515 |  | 
 | 516 | 	/* | 
 | 517 | 	 * If there was a memory error and we've overwritten all the | 
 | 518 | 	 * mapped blocks then we can now return that memory error | 
 | 519 | 	 */ | 
 | 520 | 	ret = dio->page_errors; | 
 | 521 | 	if (ret == 0) { | 
 | 522 | 		map_bh->b_state = 0; | 
 | 523 | 		map_bh->b_size = 0; | 
 | 524 | 		BUG_ON(dio->block_in_file >= dio->final_block_in_request); | 
 | 525 | 		fs_startblk = dio->block_in_file >> dio->blkfactor; | 
 | 526 | 		dio_count = dio->final_block_in_request - dio->block_in_file; | 
 | 527 | 		fs_count = dio_count >> dio->blkfactor; | 
 | 528 | 		blkmask = (1 << dio->blkfactor) - 1; | 
 | 529 | 		if (dio_count & blkmask)	 | 
 | 530 | 			fs_count++; | 
 | 531 |  | 
 | 532 | 		create = dio->rw == WRITE; | 
 | 533 | 		if (dio->lock_type == DIO_LOCKING) { | 
 | 534 | 			if (dio->block_in_file < (i_size_read(dio->inode) >> | 
 | 535 | 							dio->blkbits)) | 
 | 536 | 				create = 0; | 
 | 537 | 		} else if (dio->lock_type == DIO_NO_LOCKING) { | 
 | 538 | 			create = 0; | 
 | 539 | 		} | 
 | 540 | 		/* | 
 | 541 | 		 * For writes inside i_size we forbid block creations: only | 
 | 542 | 		 * overwrites are permitted.  We fall back to buffered writes | 
 | 543 | 		 * at a higher level for inside-i_size block-instantiating | 
 | 544 | 		 * writes. | 
 | 545 | 		 */ | 
 | 546 | 		ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count, | 
 | 547 | 						map_bh, create); | 
 | 548 | 	} | 
 | 549 | 	return ret; | 
 | 550 | } | 
 | 551 |  | 
 | 552 | /* | 
 | 553 |  * There is no bio.  Make one now. | 
 | 554 |  */ | 
 | 555 | static int dio_new_bio(struct dio *dio, sector_t start_sector) | 
 | 556 | { | 
 | 557 | 	sector_t sector; | 
 | 558 | 	int ret, nr_pages; | 
 | 559 |  | 
 | 560 | 	ret = dio_bio_reap(dio); | 
 | 561 | 	if (ret) | 
 | 562 | 		goto out; | 
 | 563 | 	sector = start_sector << (dio->blkbits - 9); | 
 | 564 | 	nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev)); | 
 | 565 | 	BUG_ON(nr_pages <= 0); | 
 | 566 | 	ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages); | 
 | 567 | 	dio->boundary = 0; | 
 | 568 | out: | 
 | 569 | 	return ret; | 
 | 570 | } | 
 | 571 |  | 
 | 572 | /* | 
 | 573 |  * Attempt to put the current chunk of 'cur_page' into the current BIO.  If | 
 | 574 |  * that was successful then update final_block_in_bio and take a ref against | 
 | 575 |  * the just-added page. | 
 | 576 |  * | 
 | 577 |  * Return zero on success.  Non-zero means the caller needs to start a new BIO. | 
 | 578 |  */ | 
 | 579 | static int dio_bio_add_page(struct dio *dio) | 
 | 580 | { | 
 | 581 | 	int ret; | 
 | 582 |  | 
 | 583 | 	ret = bio_add_page(dio->bio, dio->cur_page, | 
 | 584 | 			dio->cur_page_len, dio->cur_page_offset); | 
 | 585 | 	if (ret == dio->cur_page_len) { | 
 | 586 | 		/* | 
 | 587 | 		 * Decrement count only, if we are done with this page | 
 | 588 | 		 */ | 
 | 589 | 		if ((dio->cur_page_len + dio->cur_page_offset) == PAGE_SIZE) | 
 | 590 | 			dio->pages_in_io--; | 
 | 591 | 		page_cache_get(dio->cur_page); | 
 | 592 | 		dio->final_block_in_bio = dio->cur_page_block + | 
 | 593 | 			(dio->cur_page_len >> dio->blkbits); | 
 | 594 | 		ret = 0; | 
 | 595 | 	} else { | 
 | 596 | 		ret = 1; | 
 | 597 | 	} | 
 | 598 | 	return ret; | 
 | 599 | } | 
 | 600 | 		 | 
 | 601 | /* | 
 | 602 |  * Put cur_page under IO.  The section of cur_page which is described by | 
 | 603 |  * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page | 
 | 604 |  * starts on-disk at cur_page_block. | 
 | 605 |  * | 
 | 606 |  * We take a ref against the page here (on behalf of its presence in the bio). | 
 | 607 |  * | 
 | 608 |  * The caller of this function is responsible for removing cur_page from the | 
 | 609 |  * dio, and for dropping the refcount which came from that presence. | 
 | 610 |  */ | 
 | 611 | static int dio_send_cur_page(struct dio *dio) | 
 | 612 | { | 
 | 613 | 	int ret = 0; | 
 | 614 |  | 
 | 615 | 	if (dio->bio) { | 
 | 616 | 		/* | 
 | 617 | 		 * See whether this new request is contiguous with the old | 
 | 618 | 		 */ | 
 | 619 | 		if (dio->final_block_in_bio != dio->cur_page_block) | 
 | 620 | 			dio_bio_submit(dio); | 
 | 621 | 		/* | 
 | 622 | 		 * Submit now if the underlying fs is about to perform a | 
 | 623 | 		 * metadata read | 
 | 624 | 		 */ | 
 | 625 | 		if (dio->boundary) | 
 | 626 | 			dio_bio_submit(dio); | 
 | 627 | 	} | 
 | 628 |  | 
 | 629 | 	if (dio->bio == NULL) { | 
 | 630 | 		ret = dio_new_bio(dio, dio->cur_page_block); | 
 | 631 | 		if (ret) | 
 | 632 | 			goto out; | 
 | 633 | 	} | 
 | 634 |  | 
 | 635 | 	if (dio_bio_add_page(dio) != 0) { | 
 | 636 | 		dio_bio_submit(dio); | 
 | 637 | 		ret = dio_new_bio(dio, dio->cur_page_block); | 
 | 638 | 		if (ret == 0) { | 
 | 639 | 			ret = dio_bio_add_page(dio); | 
 | 640 | 			BUG_ON(ret != 0); | 
 | 641 | 		} | 
 | 642 | 	} | 
 | 643 | out: | 
 | 644 | 	return ret; | 
 | 645 | } | 
 | 646 |  | 
 | 647 | /* | 
 | 648 |  * An autonomous function to put a chunk of a page under deferred IO. | 
 | 649 |  * | 
 | 650 |  * The caller doesn't actually know (or care) whether this piece of page is in | 
 | 651 |  * a BIO, or is under IO or whatever.  We just take care of all possible  | 
 | 652 |  * situations here.  The separation between the logic of do_direct_IO() and | 
 | 653 |  * that of submit_page_section() is important for clarity.  Please don't break. | 
 | 654 |  * | 
 | 655 |  * The chunk of page starts on-disk at blocknr. | 
 | 656 |  * | 
 | 657 |  * We perform deferred IO, by recording the last-submitted page inside our | 
 | 658 |  * private part of the dio structure.  If possible, we just expand the IO | 
 | 659 |  * across that page here. | 
 | 660 |  * | 
 | 661 |  * If that doesn't work out then we put the old page into the bio and add this | 
 | 662 |  * page to the dio instead. | 
 | 663 |  */ | 
 | 664 | static int | 
 | 665 | submit_page_section(struct dio *dio, struct page *page, | 
 | 666 | 		unsigned offset, unsigned len, sector_t blocknr) | 
 | 667 | { | 
 | 668 | 	int ret = 0; | 
 | 669 |  | 
 | 670 | 	/* | 
 | 671 | 	 * Can we just grow the current page's presence in the dio? | 
 | 672 | 	 */ | 
 | 673 | 	if (	(dio->cur_page == page) && | 
 | 674 | 		(dio->cur_page_offset + dio->cur_page_len == offset) && | 
 | 675 | 		(dio->cur_page_block + | 
 | 676 | 			(dio->cur_page_len >> dio->blkbits) == blocknr)) { | 
 | 677 | 		dio->cur_page_len += len; | 
 | 678 |  | 
 | 679 | 		/* | 
 | 680 | 		 * If dio->boundary then we want to schedule the IO now to | 
 | 681 | 		 * avoid metadata seeks. | 
 | 682 | 		 */ | 
 | 683 | 		if (dio->boundary) { | 
 | 684 | 			ret = dio_send_cur_page(dio); | 
 | 685 | 			page_cache_release(dio->cur_page); | 
 | 686 | 			dio->cur_page = NULL; | 
 | 687 | 		} | 
 | 688 | 		goto out; | 
 | 689 | 	} | 
 | 690 |  | 
 | 691 | 	/* | 
 | 692 | 	 * If there's a deferred page already there then send it. | 
 | 693 | 	 */ | 
 | 694 | 	if (dio->cur_page) { | 
 | 695 | 		ret = dio_send_cur_page(dio); | 
 | 696 | 		page_cache_release(dio->cur_page); | 
 | 697 | 		dio->cur_page = NULL; | 
 | 698 | 		if (ret) | 
 | 699 | 			goto out; | 
 | 700 | 	} | 
 | 701 |  | 
 | 702 | 	page_cache_get(page);		/* It is in dio */ | 
 | 703 | 	dio->cur_page = page; | 
 | 704 | 	dio->cur_page_offset = offset; | 
 | 705 | 	dio->cur_page_len = len; | 
 | 706 | 	dio->cur_page_block = blocknr; | 
 | 707 | out: | 
 | 708 | 	return ret; | 
 | 709 | } | 
 | 710 |  | 
 | 711 | /* | 
 | 712 |  * Clean any dirty buffers in the blockdev mapping which alias newly-created | 
 | 713 |  * file blocks.  Only called for S_ISREG files - blockdevs do not set | 
 | 714 |  * buffer_new | 
 | 715 |  */ | 
 | 716 | static void clean_blockdev_aliases(struct dio *dio) | 
 | 717 | { | 
 | 718 | 	unsigned i; | 
 | 719 | 	unsigned nblocks; | 
 | 720 |  | 
 | 721 | 	nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits; | 
 | 722 |  | 
 | 723 | 	for (i = 0; i < nblocks; i++) { | 
 | 724 | 		unmap_underlying_metadata(dio->map_bh.b_bdev, | 
 | 725 | 					dio->map_bh.b_blocknr + i); | 
 | 726 | 	} | 
 | 727 | } | 
 | 728 |  | 
 | 729 | /* | 
 | 730 |  * If we are not writing the entire block and get_block() allocated | 
 | 731 |  * the block for us, we need to fill-in the unused portion of the | 
 | 732 |  * block with zeros. This happens only if user-buffer, fileoffset or | 
 | 733 |  * io length is not filesystem block-size multiple. | 
 | 734 |  * | 
 | 735 |  * `end' is zero if we're doing the start of the IO, 1 at the end of the | 
 | 736 |  * IO. | 
 | 737 |  */ | 
 | 738 | static void dio_zero_block(struct dio *dio, int end) | 
 | 739 | { | 
 | 740 | 	unsigned dio_blocks_per_fs_block; | 
 | 741 | 	unsigned this_chunk_blocks;	/* In dio_blocks */ | 
 | 742 | 	unsigned this_chunk_bytes; | 
 | 743 | 	struct page *page; | 
 | 744 |  | 
 | 745 | 	dio->start_zero_done = 1; | 
 | 746 | 	if (!dio->blkfactor || !buffer_new(&dio->map_bh)) | 
 | 747 | 		return; | 
 | 748 |  | 
 | 749 | 	dio_blocks_per_fs_block = 1 << dio->blkfactor; | 
 | 750 | 	this_chunk_blocks = dio->block_in_file & (dio_blocks_per_fs_block - 1); | 
 | 751 |  | 
 | 752 | 	if (!this_chunk_blocks) | 
 | 753 | 		return; | 
 | 754 |  | 
 | 755 | 	/* | 
 | 756 | 	 * We need to zero out part of an fs block.  It is either at the | 
 | 757 | 	 * beginning or the end of the fs block. | 
 | 758 | 	 */ | 
 | 759 | 	if (end)  | 
 | 760 | 		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; | 
 | 761 |  | 
 | 762 | 	this_chunk_bytes = this_chunk_blocks << dio->blkbits; | 
 | 763 |  | 
 | 764 | 	page = ZERO_PAGE(dio->curr_user_address); | 
 | 765 | 	if (submit_page_section(dio, page, 0, this_chunk_bytes,  | 
 | 766 | 				dio->next_block_for_io)) | 
 | 767 | 		return; | 
 | 768 |  | 
 | 769 | 	dio->next_block_for_io += this_chunk_blocks; | 
 | 770 | } | 
 | 771 |  | 
 | 772 | /* | 
 | 773 |  * Walk the user pages, and the file, mapping blocks to disk and generating | 
 | 774 |  * a sequence of (page,offset,len,block) mappings.  These mappings are injected | 
 | 775 |  * into submit_page_section(), which takes care of the next stage of submission | 
 | 776 |  * | 
 | 777 |  * Direct IO against a blockdev is different from a file.  Because we can | 
 | 778 |  * happily perform page-sized but 512-byte aligned IOs.  It is important that | 
 | 779 |  * blockdev IO be able to have fine alignment and large sizes. | 
 | 780 |  * | 
 | 781 |  * So what we do is to permit the ->get_blocks function to populate bh.b_size | 
 | 782 |  * with the size of IO which is permitted at this offset and this i_blkbits. | 
 | 783 |  * | 
 | 784 |  * For best results, the blockdev should be set up with 512-byte i_blkbits and | 
 | 785 |  * it should set b_size to PAGE_SIZE or more inside get_blocks().  This gives | 
 | 786 |  * fine alignment but still allows this function to work in PAGE_SIZE units. | 
 | 787 |  */ | 
 | 788 | static int do_direct_IO(struct dio *dio) | 
 | 789 | { | 
 | 790 | 	const unsigned blkbits = dio->blkbits; | 
 | 791 | 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits; | 
 | 792 | 	struct page *page; | 
 | 793 | 	unsigned block_in_page; | 
 | 794 | 	struct buffer_head *map_bh = &dio->map_bh; | 
 | 795 | 	int ret = 0; | 
 | 796 |  | 
 | 797 | 	/* The I/O can start at any block offset within the first page */ | 
 | 798 | 	block_in_page = dio->first_block_in_page; | 
 | 799 |  | 
 | 800 | 	while (dio->block_in_file < dio->final_block_in_request) { | 
 | 801 | 		page = dio_get_page(dio); | 
 | 802 | 		if (IS_ERR(page)) { | 
 | 803 | 			ret = PTR_ERR(page); | 
 | 804 | 			goto out; | 
 | 805 | 		} | 
 | 806 |  | 
 | 807 | 		while (block_in_page < blocks_per_page) { | 
 | 808 | 			unsigned offset_in_page = block_in_page << blkbits; | 
 | 809 | 			unsigned this_chunk_bytes;	/* # of bytes mapped */ | 
 | 810 | 			unsigned this_chunk_blocks;	/* # of blocks */ | 
 | 811 | 			unsigned u; | 
 | 812 |  | 
 | 813 | 			if (dio->blocks_available == 0) { | 
 | 814 | 				/* | 
 | 815 | 				 * Need to go and map some more disk | 
 | 816 | 				 */ | 
 | 817 | 				unsigned long blkmask; | 
 | 818 | 				unsigned long dio_remainder; | 
 | 819 |  | 
 | 820 | 				ret = get_more_blocks(dio); | 
 | 821 | 				if (ret) { | 
 | 822 | 					page_cache_release(page); | 
 | 823 | 					goto out; | 
 | 824 | 				} | 
 | 825 | 				if (!buffer_mapped(map_bh)) | 
 | 826 | 					goto do_holes; | 
 | 827 |  | 
 | 828 | 				dio->blocks_available = | 
 | 829 | 						map_bh->b_size >> dio->blkbits; | 
 | 830 | 				dio->next_block_for_io = | 
 | 831 | 					map_bh->b_blocknr << dio->blkfactor; | 
 | 832 | 				if (buffer_new(map_bh)) | 
 | 833 | 					clean_blockdev_aliases(dio); | 
 | 834 |  | 
 | 835 | 				if (!dio->blkfactor) | 
 | 836 | 					goto do_holes; | 
 | 837 |  | 
 | 838 | 				blkmask = (1 << dio->blkfactor) - 1; | 
 | 839 | 				dio_remainder = (dio->block_in_file & blkmask); | 
 | 840 |  | 
 | 841 | 				/* | 
 | 842 | 				 * If we are at the start of IO and that IO | 
 | 843 | 				 * starts partway into a fs-block, | 
 | 844 | 				 * dio_remainder will be non-zero.  If the IO | 
 | 845 | 				 * is a read then we can simply advance the IO | 
 | 846 | 				 * cursor to the first block which is to be | 
 | 847 | 				 * read.  But if the IO is a write and the | 
 | 848 | 				 * block was newly allocated we cannot do that; | 
 | 849 | 				 * the start of the fs block must be zeroed out | 
 | 850 | 				 * on-disk | 
 | 851 | 				 */ | 
 | 852 | 				if (!buffer_new(map_bh)) | 
 | 853 | 					dio->next_block_for_io += dio_remainder; | 
 | 854 | 				dio->blocks_available -= dio_remainder; | 
 | 855 | 			} | 
 | 856 | do_holes: | 
 | 857 | 			/* Handle holes */ | 
 | 858 | 			if (!buffer_mapped(map_bh)) { | 
 | 859 | 				char *kaddr; | 
| Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 860 | 				loff_t i_size_aligned; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 |  | 
 | 862 | 				/* AKPM: eargh, -ENOTBLK is a hack */ | 
 | 863 | 				if (dio->rw == WRITE) { | 
 | 864 | 					page_cache_release(page); | 
 | 865 | 					return -ENOTBLK; | 
 | 866 | 				} | 
 | 867 |  | 
| Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 868 | 				/* | 
 | 869 | 				 * Be sure to account for a partial block as the | 
 | 870 | 				 * last block in the file | 
 | 871 | 				 */ | 
 | 872 | 				i_size_aligned = ALIGN(i_size_read(dio->inode), | 
 | 873 | 							1 << blkbits); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | 				if (dio->block_in_file >= | 
| Jeff Moyer | 35dc816 | 2006-02-03 03:04:27 -0800 | [diff] [blame] | 875 | 						i_size_aligned >> blkbits) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | 					/* We hit eof */ | 
 | 877 | 					page_cache_release(page); | 
 | 878 | 					goto out; | 
 | 879 | 				} | 
 | 880 | 				kaddr = kmap_atomic(page, KM_USER0); | 
 | 881 | 				memset(kaddr + (block_in_page << blkbits), | 
 | 882 | 						0, 1 << blkbits); | 
 | 883 | 				flush_dcache_page(page); | 
 | 884 | 				kunmap_atomic(kaddr, KM_USER0); | 
 | 885 | 				dio->block_in_file++; | 
 | 886 | 				block_in_page++; | 
 | 887 | 				goto next_block; | 
 | 888 | 			} | 
 | 889 |  | 
 | 890 | 			/* | 
 | 891 | 			 * If we're performing IO which has an alignment which | 
 | 892 | 			 * is finer than the underlying fs, go check to see if | 
 | 893 | 			 * we must zero out the start of this block. | 
 | 894 | 			 */ | 
 | 895 | 			if (unlikely(dio->blkfactor && !dio->start_zero_done)) | 
 | 896 | 				dio_zero_block(dio, 0); | 
 | 897 |  | 
 | 898 | 			/* | 
 | 899 | 			 * Work out, in this_chunk_blocks, how much disk we | 
 | 900 | 			 * can add to this page | 
 | 901 | 			 */ | 
 | 902 | 			this_chunk_blocks = dio->blocks_available; | 
 | 903 | 			u = (PAGE_SIZE - offset_in_page) >> blkbits; | 
 | 904 | 			if (this_chunk_blocks > u) | 
 | 905 | 				this_chunk_blocks = u; | 
 | 906 | 			u = dio->final_block_in_request - dio->block_in_file; | 
 | 907 | 			if (this_chunk_blocks > u) | 
 | 908 | 				this_chunk_blocks = u; | 
 | 909 | 			this_chunk_bytes = this_chunk_blocks << blkbits; | 
 | 910 | 			BUG_ON(this_chunk_bytes == 0); | 
 | 911 |  | 
 | 912 | 			dio->boundary = buffer_boundary(map_bh); | 
 | 913 | 			ret = submit_page_section(dio, page, offset_in_page, | 
 | 914 | 				this_chunk_bytes, dio->next_block_for_io); | 
 | 915 | 			if (ret) { | 
 | 916 | 				page_cache_release(page); | 
 | 917 | 				goto out; | 
 | 918 | 			} | 
 | 919 | 			dio->next_block_for_io += this_chunk_blocks; | 
 | 920 |  | 
 | 921 | 			dio->block_in_file += this_chunk_blocks; | 
 | 922 | 			block_in_page += this_chunk_blocks; | 
 | 923 | 			dio->blocks_available -= this_chunk_blocks; | 
 | 924 | next_block: | 
 | 925 | 			if (dio->block_in_file > dio->final_block_in_request) | 
 | 926 | 				BUG(); | 
 | 927 | 			if (dio->block_in_file == dio->final_block_in_request) | 
 | 928 | 				break; | 
 | 929 | 		} | 
 | 930 |  | 
 | 931 | 		/* Drop the ref which was taken in get_user_pages() */ | 
 | 932 | 		page_cache_release(page); | 
 | 933 | 		block_in_page = 0; | 
 | 934 | 	} | 
 | 935 | out: | 
 | 936 | 	return ret; | 
 | 937 | } | 
 | 938 |  | 
 | 939 | /* | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 940 |  * Releases both i_mutex and i_alloc_sem | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 |  */ | 
 | 942 | static ssize_t | 
 | 943 | direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,  | 
 | 944 | 	const struct iovec *iov, loff_t offset, unsigned long nr_segs,  | 
 | 945 | 	unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io, | 
 | 946 | 	struct dio *dio) | 
 | 947 | { | 
 | 948 | 	unsigned long user_addr;  | 
 | 949 | 	int seg; | 
 | 950 | 	ssize_t ret = 0; | 
 | 951 | 	ssize_t ret2; | 
 | 952 | 	size_t bytes; | 
 | 953 |  | 
 | 954 | 	dio->bio = NULL; | 
 | 955 | 	dio->inode = inode; | 
 | 956 | 	dio->rw = rw; | 
 | 957 | 	dio->blkbits = blkbits; | 
 | 958 | 	dio->blkfactor = inode->i_blkbits - blkbits; | 
 | 959 | 	dio->start_zero_done = 0; | 
 | 960 | 	dio->size = 0; | 
 | 961 | 	dio->block_in_file = offset >> blkbits; | 
 | 962 | 	dio->blocks_available = 0; | 
 | 963 | 	dio->cur_page = NULL; | 
 | 964 |  | 
 | 965 | 	dio->boundary = 0; | 
 | 966 | 	dio->reap_counter = 0; | 
 | 967 | 	dio->get_blocks = get_blocks; | 
 | 968 | 	dio->end_io = end_io; | 
 | 969 | 	dio->map_bh.b_private = NULL; | 
 | 970 | 	dio->final_block_in_bio = -1; | 
 | 971 | 	dio->next_block_for_io = -1; | 
 | 972 |  | 
 | 973 | 	dio->page_errors = 0; | 
 | 974 | 	dio->result = 0; | 
 | 975 | 	dio->iocb = iocb; | 
| Daniel McNeil | 29504ff | 2005-04-16 15:25:50 -0700 | [diff] [blame] | 976 | 	dio->i_size = i_size_read(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 |  | 
 | 978 | 	/* | 
 | 979 | 	 * BIO completion state. | 
 | 980 | 	 * | 
 | 981 | 	 * ->bio_count starts out at one, and we decrement it to zero after all | 
 | 982 | 	 * BIOs are submitted.  This to avoid the situation where a really fast | 
 | 983 | 	 * (or synchronous) device could take the count to zero while we're | 
 | 984 | 	 * still submitting BIOs. | 
 | 985 | 	 */ | 
 | 986 | 	dio->bio_count = 1; | 
 | 987 | 	dio->bios_in_flight = 0; | 
 | 988 | 	spin_lock_init(&dio->bio_lock); | 
 | 989 | 	dio->bio_list = NULL; | 
 | 990 | 	dio->waiter = NULL; | 
 | 991 |  | 
 | 992 | 	/* | 
 | 993 | 	 * In case of non-aligned buffers, we may need 2 more | 
 | 994 | 	 * pages since we need to zero out first and last block. | 
 | 995 | 	 */ | 
 | 996 | 	if (unlikely(dio->blkfactor)) | 
 | 997 | 		dio->pages_in_io = 2; | 
 | 998 | 	else | 
 | 999 | 		dio->pages_in_io = 0; | 
 | 1000 |  | 
 | 1001 | 	for (seg = 0; seg < nr_segs; seg++) { | 
 | 1002 | 		user_addr = (unsigned long)iov[seg].iov_base; | 
 | 1003 | 		dio->pages_in_io += | 
 | 1004 | 			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE | 
 | 1005 | 				- user_addr/PAGE_SIZE); | 
 | 1006 | 	} | 
 | 1007 |  | 
 | 1008 | 	for (seg = 0; seg < nr_segs; seg++) { | 
 | 1009 | 		user_addr = (unsigned long)iov[seg].iov_base; | 
 | 1010 | 		dio->size += bytes = iov[seg].iov_len; | 
 | 1011 |  | 
 | 1012 | 		/* Index into the first page of the first block */ | 
 | 1013 | 		dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits; | 
 | 1014 | 		dio->final_block_in_request = dio->block_in_file + | 
 | 1015 | 						(bytes >> blkbits); | 
 | 1016 | 		/* Page fetching state */ | 
 | 1017 | 		dio->head = 0; | 
 | 1018 | 		dio->tail = 0; | 
 | 1019 | 		dio->curr_page = 0; | 
 | 1020 |  | 
 | 1021 | 		dio->total_pages = 0; | 
 | 1022 | 		if (user_addr & (PAGE_SIZE-1)) { | 
 | 1023 | 			dio->total_pages++; | 
 | 1024 | 			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1)); | 
 | 1025 | 		} | 
 | 1026 | 		dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE; | 
 | 1027 | 		dio->curr_user_address = user_addr; | 
 | 1028 | 	 | 
 | 1029 | 		ret = do_direct_IO(dio); | 
 | 1030 |  | 
 | 1031 | 		dio->result += iov[seg].iov_len - | 
 | 1032 | 			((dio->final_block_in_request - dio->block_in_file) << | 
 | 1033 | 					blkbits); | 
 | 1034 |  | 
 | 1035 | 		if (ret) { | 
 | 1036 | 			dio_cleanup(dio); | 
 | 1037 | 			break; | 
 | 1038 | 		} | 
 | 1039 | 	} /* end iovec loop */ | 
 | 1040 |  | 
 | 1041 | 	if (ret == -ENOTBLK && rw == WRITE) { | 
 | 1042 | 		/* | 
 | 1043 | 		 * The remaining part of the request will be | 
 | 1044 | 		 * be handled by buffered I/O when we return | 
 | 1045 | 		 */ | 
 | 1046 | 		ret = 0; | 
 | 1047 | 	} | 
 | 1048 | 	/* | 
 | 1049 | 	 * There may be some unwritten disk at the end of a part-written | 
 | 1050 | 	 * fs-block-sized block.  Go zero that now. | 
 | 1051 | 	 */ | 
 | 1052 | 	dio_zero_block(dio, 1); | 
 | 1053 |  | 
 | 1054 | 	if (dio->cur_page) { | 
 | 1055 | 		ret2 = dio_send_cur_page(dio); | 
 | 1056 | 		if (ret == 0) | 
 | 1057 | 			ret = ret2; | 
 | 1058 | 		page_cache_release(dio->cur_page); | 
 | 1059 | 		dio->cur_page = NULL; | 
 | 1060 | 	} | 
 | 1061 | 	if (dio->bio) | 
 | 1062 | 		dio_bio_submit(dio); | 
 | 1063 |  | 
 | 1064 | 	/* | 
 | 1065 | 	 * It is possible that, we return short IO due to end of file. | 
 | 1066 | 	 * In that case, we need to release all the pages we got hold on. | 
 | 1067 | 	 */ | 
 | 1068 | 	dio_cleanup(dio); | 
 | 1069 |  | 
 | 1070 | 	/* | 
 | 1071 | 	 * All block lookups have been performed. For READ requests | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1072 | 	 * we can let i_mutex go now that its achieved its purpose | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | 	 * of protecting us from looking up uninitialized blocks. | 
 | 1074 | 	 */ | 
 | 1075 | 	if ((rw == READ) && (dio->lock_type == DIO_LOCKING)) | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1076 | 		mutex_unlock(&dio->inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 |  | 
 | 1078 | 	/* | 
 | 1079 | 	 * OK, all BIOs are submitted, so we can decrement bio_count to truly | 
 | 1080 | 	 * reflect the number of to-be-processed BIOs. | 
 | 1081 | 	 */ | 
 | 1082 | 	if (dio->is_async) { | 
 | 1083 | 		int should_wait = 0; | 
 | 1084 |  | 
 | 1085 | 		if (dio->result < dio->size && rw == WRITE) { | 
 | 1086 | 			dio->waiter = current; | 
 | 1087 | 			should_wait = 1; | 
 | 1088 | 		} | 
 | 1089 | 		if (ret == 0) | 
 | 1090 | 			ret = dio->result; | 
 | 1091 | 		finished_one_bio(dio);		/* This can free the dio */ | 
 | 1092 | 		blk_run_address_space(inode->i_mapping); | 
 | 1093 | 		if (should_wait) { | 
 | 1094 | 			unsigned long flags; | 
 | 1095 | 			/* | 
 | 1096 | 			 * Wait for already issued I/O to drain out and | 
 | 1097 | 			 * release its references to user-space pages | 
 | 1098 | 			 * before returning to fallback on buffered I/O | 
 | 1099 | 			 */ | 
 | 1100 |  | 
 | 1101 | 			spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 1102 | 			set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 1103 | 			while (dio->bio_count) { | 
 | 1104 | 				spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 1105 | 				io_schedule(); | 
 | 1106 | 				spin_lock_irqsave(&dio->bio_lock, flags); | 
 | 1107 | 				set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 1108 | 			} | 
 | 1109 | 			spin_unlock_irqrestore(&dio->bio_lock, flags); | 
 | 1110 | 			set_current_state(TASK_RUNNING); | 
 | 1111 | 			kfree(dio); | 
 | 1112 | 		} | 
 | 1113 | 	} else { | 
 | 1114 | 		ssize_t transferred = 0; | 
 | 1115 |  | 
 | 1116 | 		finished_one_bio(dio); | 
 | 1117 | 		ret2 = dio_await_completion(dio); | 
 | 1118 | 		if (ret == 0) | 
 | 1119 | 			ret = ret2; | 
 | 1120 | 		if (ret == 0) | 
 | 1121 | 			ret = dio->page_errors; | 
 | 1122 | 		if (dio->result) { | 
 | 1123 | 			loff_t i_size = i_size_read(inode); | 
 | 1124 |  | 
 | 1125 | 			transferred = dio->result; | 
 | 1126 | 			/* | 
 | 1127 | 			 * Adjust the return value if the read crossed a | 
 | 1128 | 			 * non-block-aligned EOF. | 
 | 1129 | 			 */ | 
 | 1130 | 			if (rw == READ && (offset + transferred > i_size)) | 
 | 1131 | 				transferred = i_size - offset; | 
 | 1132 | 		} | 
 | 1133 | 		dio_complete(dio, offset, transferred); | 
 | 1134 | 		if (ret == 0) | 
 | 1135 | 			ret = transferred; | 
 | 1136 |  | 
 | 1137 | 		/* We could have also come here on an AIO file extend */ | 
 | 1138 | 		if (!is_sync_kiocb(iocb) && rw == WRITE && | 
 | 1139 | 		    ret >= 0 && dio->result == dio->size) | 
 | 1140 | 			/* | 
 | 1141 | 			 * For AIO writes where we have completed the | 
 | 1142 | 			 * i/o, we have to mark the the aio complete. | 
 | 1143 | 			 */ | 
 | 1144 | 			aio_complete(iocb, ret, 0); | 
 | 1145 | 		kfree(dio); | 
 | 1146 | 	} | 
 | 1147 | 	return ret; | 
 | 1148 | } | 
 | 1149 |  | 
 | 1150 | /* | 
 | 1151 |  * This is a library function for use by filesystem drivers. | 
 | 1152 |  * The locking rules are governed by the dio_lock_type parameter. | 
 | 1153 |  * | 
 | 1154 |  * DIO_NO_LOCKING (no locking, for raw block device access) | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1155 |  * For writes, i_mutex is not held on entry; it is never taken. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 |  * | 
 | 1157 |  * DIO_LOCKING (simple locking for regular files) | 
| Nathan Scott | 3fb962b | 2006-03-15 15:14:45 +1100 | [diff] [blame] | 1158 |  * For writes we are called under i_mutex and return with i_mutex held, even | 
 | 1159 |  * though it is internally dropped. | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1160 |  * For reads, i_mutex is not held on entry, but it is taken and dropped before | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 |  * returning. | 
 | 1162 |  * | 
 | 1163 |  * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of | 
 | 1164 |  *	uninitialised data, allowing parallel direct readers and writers) | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1165 |  * For writes we are called without i_mutex, return without it, never touch it. | 
| Nathan Scott | 3fb962b | 2006-03-15 15:14:45 +1100 | [diff] [blame] | 1166 |  * For reads we are called under i_mutex and return with i_mutex held, even | 
 | 1167 |  * though it may be internally dropped. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1168 |  * | 
 | 1169 |  * Additional i_alloc_sem locking requirements described inline below. | 
 | 1170 |  */ | 
 | 1171 | ssize_t | 
 | 1172 | __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 
 | 1173 | 	struct block_device *bdev, const struct iovec *iov, loff_t offset,  | 
 | 1174 | 	unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, | 
 | 1175 | 	int dio_lock_type) | 
 | 1176 | { | 
 | 1177 | 	int seg; | 
 | 1178 | 	size_t size; | 
 | 1179 | 	unsigned long addr; | 
 | 1180 | 	unsigned blkbits = inode->i_blkbits; | 
 | 1181 | 	unsigned bdev_blkbits = 0; | 
 | 1182 | 	unsigned blocksize_mask = (1 << blkbits) - 1; | 
 | 1183 | 	ssize_t retval = -EINVAL; | 
 | 1184 | 	loff_t end = offset; | 
 | 1185 | 	struct dio *dio; | 
| Nathan Scott | 3fb962b | 2006-03-15 15:14:45 +1100 | [diff] [blame] | 1186 | 	int release_i_mutex = 0; | 
 | 1187 | 	int acquire_i_mutex = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 |  | 
 | 1189 | 	if (rw & WRITE) | 
 | 1190 | 		current->flags |= PF_SYNCWRITE; | 
 | 1191 |  | 
 | 1192 | 	if (bdev) | 
 | 1193 | 		bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); | 
 | 1194 |  | 
 | 1195 | 	if (offset & blocksize_mask) { | 
 | 1196 | 		if (bdev) | 
 | 1197 | 			 blkbits = bdev_blkbits; | 
 | 1198 | 		blocksize_mask = (1 << blkbits) - 1; | 
 | 1199 | 		if (offset & blocksize_mask) | 
 | 1200 | 			goto out; | 
 | 1201 | 	} | 
 | 1202 |  | 
 | 1203 | 	/* Check the memory alignment.  Blocks cannot straddle pages */ | 
 | 1204 | 	for (seg = 0; seg < nr_segs; seg++) { | 
 | 1205 | 		addr = (unsigned long)iov[seg].iov_base; | 
 | 1206 | 		size = iov[seg].iov_len; | 
 | 1207 | 		end += size; | 
 | 1208 | 		if ((addr & blocksize_mask) || (size & blocksize_mask))  { | 
 | 1209 | 			if (bdev) | 
 | 1210 | 				 blkbits = bdev_blkbits; | 
 | 1211 | 			blocksize_mask = (1 << blkbits) - 1; | 
 | 1212 | 			if ((addr & blocksize_mask) || (size & blocksize_mask))   | 
 | 1213 | 				goto out; | 
 | 1214 | 		} | 
 | 1215 | 	} | 
 | 1216 |  | 
 | 1217 | 	dio = kmalloc(sizeof(*dio), GFP_KERNEL); | 
 | 1218 | 	retval = -ENOMEM; | 
 | 1219 | 	if (!dio) | 
 | 1220 | 		goto out; | 
 | 1221 |  | 
 | 1222 | 	/* | 
 | 1223 | 	 * For block device access DIO_NO_LOCKING is used, | 
 | 1224 | 	 *	neither readers nor writers do any locking at all | 
 | 1225 | 	 * For regular files using DIO_LOCKING, | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1226 | 	 *	readers need to grab i_mutex and i_alloc_sem | 
 | 1227 | 	 *	writers need to grab i_alloc_sem only (i_mutex is already held) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | 	 * For regular files using DIO_OWN_LOCKING, | 
 | 1229 | 	 *	neither readers nor writers take any locks here | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | 	 */ | 
 | 1231 | 	dio->lock_type = dio_lock_type; | 
 | 1232 | 	if (dio_lock_type != DIO_NO_LOCKING) { | 
 | 1233 | 		/* watch out for a 0 len io from a tricksy fs */ | 
 | 1234 | 		if (rw == READ && end > offset) { | 
 | 1235 | 			struct address_space *mapping; | 
 | 1236 |  | 
 | 1237 | 			mapping = iocb->ki_filp->f_mapping; | 
 | 1238 | 			if (dio_lock_type != DIO_OWN_LOCKING) { | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1239 | 				mutex_lock(&inode->i_mutex); | 
| Nathan Scott | 3fb962b | 2006-03-15 15:14:45 +1100 | [diff] [blame] | 1240 | 				release_i_mutex = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | 			} | 
 | 1242 |  | 
 | 1243 | 			retval = filemap_write_and_wait_range(mapping, offset, | 
 | 1244 | 							      end - 1); | 
 | 1245 | 			if (retval) { | 
 | 1246 | 				kfree(dio); | 
 | 1247 | 				goto out; | 
 | 1248 | 			} | 
 | 1249 |  | 
 | 1250 | 			if (dio_lock_type == DIO_OWN_LOCKING) { | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1251 | 				mutex_unlock(&inode->i_mutex); | 
| Nathan Scott | 3fb962b | 2006-03-15 15:14:45 +1100 | [diff] [blame] | 1252 | 				acquire_i_mutex = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1253 | 			} | 
 | 1254 | 		} | 
 | 1255 |  | 
 | 1256 | 		if (dio_lock_type == DIO_LOCKING) | 
 | 1257 | 			down_read(&inode->i_alloc_sem); | 
 | 1258 | 	} | 
 | 1259 |  | 
 | 1260 | 	/* | 
 | 1261 | 	 * For file extending writes updating i_size before data | 
 | 1262 | 	 * writeouts complete can expose uninitialized blocks. So | 
 | 1263 | 	 * even for AIO, we need to wait for i/o to complete before | 
 | 1264 | 	 * returning in this case. | 
 | 1265 | 	 */ | 
 | 1266 | 	dio->is_async = !is_sync_kiocb(iocb) && !((rw == WRITE) && | 
 | 1267 | 		(end > i_size_read(inode))); | 
 | 1268 |  | 
 | 1269 | 	retval = direct_io_worker(rw, iocb, inode, iov, offset, | 
 | 1270 | 				nr_segs, blkbits, get_blocks, end_io, dio); | 
 | 1271 |  | 
 | 1272 | 	if (rw == READ && dio_lock_type == DIO_LOCKING) | 
| Nathan Scott | 3fb962b | 2006-03-15 15:14:45 +1100 | [diff] [blame] | 1273 | 		release_i_mutex = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 |  | 
 | 1275 | out: | 
| Nathan Scott | 3fb962b | 2006-03-15 15:14:45 +1100 | [diff] [blame] | 1276 | 	if (release_i_mutex) | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1277 | 		mutex_unlock(&inode->i_mutex); | 
| Nathan Scott | 3fb962b | 2006-03-15 15:14:45 +1100 | [diff] [blame] | 1278 | 	else if (acquire_i_mutex) | 
 | 1279 | 		mutex_lock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 | 	if (rw & WRITE) | 
 | 1281 | 		current->flags &= ~PF_SYNCWRITE; | 
 | 1282 | 	return retval; | 
 | 1283 | } | 
 | 1284 | EXPORT_SYMBOL(__blockdev_direct_IO); |