| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * Copyright (C) 1991, 1992 Linus Torvalds | 
 | 3 |  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics | 
 | 4 |  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE | 
 | 5 |  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 6 |  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> | 
 | 7 |  *	-  July2000 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 |  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | /* | 
 | 12 |  * This handles all read/write requests to block devices | 
 | 13 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel.h> | 
 | 15 | #include <linux/module.h> | 
 | 16 | #include <linux/backing-dev.h> | 
 | 17 | #include <linux/bio.h> | 
 | 18 | #include <linux/blkdev.h> | 
 | 19 | #include <linux/highmem.h> | 
 | 20 | #include <linux/mm.h> | 
 | 21 | #include <linux/kernel_stat.h> | 
 | 22 | #include <linux/string.h> | 
 | 23 | #include <linux/init.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/completion.h> | 
 | 25 | #include <linux/slab.h> | 
 | 26 | #include <linux/swap.h> | 
 | 27 | #include <linux/writeback.h> | 
| Andrew Morton | faccbd4 | 2006-12-10 02:19:35 -0800 | [diff] [blame] | 28 | #include <linux/task_io_accounting_ops.h> | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 29 | #include <linux/fault-inject.h> | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 30 | #include <linux/list_sort.h> | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 31 | #include <linux/delay.h> | 
| Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 32 | #include <linux/ratelimit.h> | 
| Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 33 |  | 
 | 34 | #define CREATE_TRACE_POINTS | 
 | 35 | #include <trace/events/block.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 37 | #include "blk.h" | 
| Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 38 | #include "blk-cgroup.h" | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 39 |  | 
| Mike Snitzer | d07335e | 2010-11-16 12:52:38 +0100 | [diff] [blame] | 40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | 
| Jun'ichi Nomura | b0da3f0 | 2009-10-01 21:16:13 +0200 | [diff] [blame] | 41 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 
| NeilBrown | cbae8d4 | 2012-12-14 20:49:27 +0100 | [diff] [blame] | 42 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); | 
| Ingo Molnar | 0bfc245 | 2008-11-26 11:59:56 +0100 | [diff] [blame] | 43 |  | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 44 | DEFINE_IDA(blk_queue_ida); | 
 | 45 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | /* | 
 | 47 |  * For the allocated request tables | 
 | 48 |  */ | 
| Adrian Bunk | 5ece6c5 | 2008-02-18 13:45:51 +0100 | [diff] [blame] | 49 | static struct kmem_cache *request_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 |  | 
 | 51 | /* | 
 | 52 |  * For queue allocation | 
 | 53 |  */ | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 54 | struct kmem_cache *blk_requestq_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
 | 56 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  * Controlling structure to kblockd | 
 | 58 |  */ | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 59 | static struct workqueue_struct *kblockd_workqueue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 |  | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 61 | static void drive_stat_acct(struct request *rq, int new_io) | 
 | 62 | { | 
| Jens Axboe | 28f1370 | 2008-05-07 10:15:46 +0200 | [diff] [blame] | 63 | 	struct hd_struct *part; | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 64 | 	int rw = rq_data_dir(rq); | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 65 | 	int cpu; | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 66 |  | 
| Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 67 | 	if (!blk_do_io_stat(rq)) | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 68 | 		return; | 
 | 69 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 70 | 	cpu = part_stat_lock(); | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 71 |  | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 72 | 	if (!new_io) { | 
 | 73 | 		part = rq->part; | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 74 | 		part_stat_inc(cpu, part, merges[rw]); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 75 | 	} else { | 
 | 76 | 		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); | 
| Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 77 | 		if (!hd_struct_try_get(part)) { | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 78 | 			/* | 
 | 79 | 			 * The partition is already being removed, | 
 | 80 | 			 * the request will be accounted on the disk only | 
 | 81 | 			 * | 
 | 82 | 			 * We take a reference on disk->part0 although that | 
 | 83 | 			 * partition will never be deleted, so we can treat | 
 | 84 | 			 * it as any other partition. | 
 | 85 | 			 */ | 
 | 86 | 			part = &rq->rq_disk->part0; | 
| Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 87 | 			hd_struct_get(part); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 88 | 		} | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 89 | 		part_round_stats(cpu, part); | 
| Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 90 | 		part_inc_in_flight(part, rw); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 91 | 		rq->part = part; | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 92 | 	} | 
| Tejun Heo | e71bf0d | 2008-09-03 09:03:02 +0200 | [diff] [blame] | 93 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 94 | 	part_stat_unlock(); | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 95 | } | 
 | 96 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 97 | void blk_queue_congestion_threshold(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | { | 
 | 99 | 	int nr; | 
 | 100 |  | 
 | 101 | 	nr = q->nr_requests - (q->nr_requests / 8) + 1; | 
 | 102 | 	if (nr > q->nr_requests) | 
 | 103 | 		nr = q->nr_requests; | 
 | 104 | 	q->nr_congestion_on = nr; | 
 | 105 |  | 
 | 106 | 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; | 
 | 107 | 	if (nr < 1) | 
 | 108 | 		nr = 1; | 
 | 109 | 	q->nr_congestion_off = nr; | 
 | 110 | } | 
 | 111 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | /** | 
 | 113 |  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | 
 | 114 |  * @bdev:	device | 
 | 115 |  * | 
 | 116 |  * Locates the passed device's request queue and returns the address of its | 
 | 117 |  * backing_dev_info | 
 | 118 |  * | 
 | 119 |  * Will return NULL if the request queue cannot be located. | 
 | 120 |  */ | 
 | 121 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | 
 | 122 | { | 
 | 123 | 	struct backing_dev_info *ret = NULL; | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 124 | 	struct request_queue *q = bdev_get_queue(bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 |  | 
 | 126 | 	if (q) | 
 | 127 | 		ret = &q->backing_dev_info; | 
 | 128 | 	return ret; | 
 | 129 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | EXPORT_SYMBOL(blk_get_backing_dev_info); | 
 | 131 |  | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 132 | void blk_rq_init(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | { | 
| FUJITA Tomonori | 1afb20f | 2008-04-25 12:26:28 +0200 | [diff] [blame] | 134 | 	memset(rq, 0, sizeof(*rq)); | 
 | 135 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | 	INIT_LIST_HEAD(&rq->queuelist); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 137 | 	INIT_LIST_HEAD(&rq->timeout_list); | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 138 | 	rq->cpu = -1; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 139 | 	rq->q = q; | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 140 | 	rq->__sector = (sector_t) -1; | 
| Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 141 | 	INIT_HLIST_NODE(&rq->hash); | 
 | 142 | 	RB_CLEAR_NODE(&rq->rb_node); | 
| FUJITA Tomonori | d7e3c32 | 2008-04-29 09:54:39 +0200 | [diff] [blame] | 143 | 	rq->cmd = rq->__cmd; | 
| Li Zefan | e2494e1 | 2009-04-02 13:43:26 +0800 | [diff] [blame] | 144 | 	rq->cmd_len = BLK_MAX_CDB; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 145 | 	rq->tag = -1; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 146 | 	rq->ref_count = 1; | 
| Tejun Heo | b243ddc | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 147 | 	rq->start_time = jiffies; | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 148 | 	set_start_time_ns(rq); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 149 | 	rq->part = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | } | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 151 | EXPORT_SYMBOL(blk_rq_init); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 |  | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 153 | static void req_bio_endio(struct request *rq, struct bio *bio, | 
 | 154 | 			  unsigned int nbytes, int error) | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 155 | { | 
| Tejun Heo | 143a87f | 2011-01-25 12:43:52 +0100 | [diff] [blame] | 156 | 	if (error) | 
 | 157 | 		clear_bit(BIO_UPTODATE, &bio->bi_flags); | 
 | 158 | 	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 
 | 159 | 		error = -EIO; | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 160 |  | 
| Tejun Heo | 143a87f | 2011-01-25 12:43:52 +0100 | [diff] [blame] | 161 | 	if (unlikely(nbytes > bio->bi_size)) { | 
 | 162 | 		printk(KERN_ERR "%s: want %u bytes done, %u left\n", | 
 | 163 | 		       __func__, nbytes, bio->bi_size); | 
 | 164 | 		nbytes = bio->bi_size; | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 165 | 	} | 
| Tejun Heo | 143a87f | 2011-01-25 12:43:52 +0100 | [diff] [blame] | 166 |  | 
 | 167 | 	if (unlikely(rq->cmd_flags & REQ_QUIET)) | 
 | 168 | 		set_bit(BIO_QUIET, &bio->bi_flags); | 
 | 169 |  | 
 | 170 | 	bio->bi_size -= nbytes; | 
 | 171 | 	bio->bi_sector += (nbytes >> 9); | 
 | 172 |  | 
 | 173 | 	if (bio_integrity(bio)) | 
 | 174 | 		bio_integrity_advance(bio, nbytes); | 
 | 175 |  | 
 | 176 | 	/* don't actually finish bio if it's part of flush sequence */ | 
 | 177 | 	if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) | 
 | 178 | 		bio_endio(bio, error); | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 179 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | void blk_dump_rq_flags(struct request *rq, char *msg) | 
 | 182 | { | 
 | 183 | 	int bit; | 
 | 184 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 185 | 	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 186 | 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, | 
 | 187 | 		rq->cmd_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  | 
| Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 189 | 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n", | 
 | 190 | 	       (unsigned long long)blk_rq_pos(rq), | 
 | 191 | 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 192 | 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n", | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 193 | 	       rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 195 | 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 196 | 		printk(KERN_INFO "  cdb: "); | 
| FUJITA Tomonori | d34c87e | 2008-04-29 14:37:52 +0200 | [diff] [blame] | 197 | 		for (bit = 0; bit < BLK_MAX_CDB; bit++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | 			printk("%02x ", rq->cmd[bit]); | 
 | 199 | 		printk("\n"); | 
 | 200 | 	} | 
 | 201 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | EXPORT_SYMBOL(blk_dump_rq_flags); | 
 | 203 |  | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 204 | static void blk_delay_work(struct work_struct *work) | 
| Jens Axboe | 6c5e0c4 | 2008-08-01 20:31:32 +0200 | [diff] [blame] | 205 | { | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 206 | 	struct request_queue *q; | 
| Jens Axboe | 6c5e0c4 | 2008-08-01 20:31:32 +0200 | [diff] [blame] | 207 |  | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 208 | 	q = container_of(work, struct request_queue, delay_work.work); | 
 | 209 | 	spin_lock_irq(q->queue_lock); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 210 | 	__blk_run_queue(q); | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 211 | 	spin_unlock_irq(q->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 |  | 
 | 214 | /** | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 215 |  * blk_delay_queue - restart queueing after defined interval | 
 | 216 |  * @q:		The &struct request_queue in question | 
 | 217 |  * @msecs:	Delay in msecs | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 |  * | 
 | 219 |  * Description: | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 220 |  *   Sometimes queueing needs to be postponed for a little while, to allow | 
 | 221 |  *   resources to come back. This function will make sure that queueing is | 
| Bart Van Assche | 7046057 | 2012-11-28 13:45:56 +0100 | [diff] [blame] | 222 |  *   restarted around the specified time. Queue lock must be held. | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 223 |  */ | 
 | 224 | void blk_delay_queue(struct request_queue *q, unsigned long msecs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | { | 
| Bart Van Assche | 7046057 | 2012-11-28 13:45:56 +0100 | [diff] [blame] | 226 | 	if (likely(!blk_queue_dead(q))) | 
 | 227 | 		queue_delayed_work(kblockd_workqueue, &q->delay_work, | 
 | 228 | 				   msecs_to_jiffies(msecs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | } | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 230 | EXPORT_SYMBOL(blk_delay_queue); | 
| Alan D. Brunelle | 2ad8b1e | 2007-11-07 14:26:56 -0500 | [diff] [blame] | 231 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | /** | 
 | 233 |  * blk_start_queue - restart a previously stopped queue | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 234 |  * @q:    The &struct request_queue in question | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 |  * | 
 | 236 |  * Description: | 
 | 237 |  *   blk_start_queue() will clear the stop flag on the queue, and call | 
 | 238 |  *   the request_fn for the queue if it was in a stopped state when | 
 | 239 |  *   entered. Also see blk_stop_queue(). Queue lock must be held. | 
 | 240 |  **/ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 241 | void blk_start_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | { | 
| Paolo 'Blaisorblade' Giarrusso | a038e25 | 2006-06-05 12:09:01 +0200 | [diff] [blame] | 243 | 	WARN_ON(!irqs_disabled()); | 
 | 244 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 245 | 	queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 246 | 	__blk_run_queue(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | EXPORT_SYMBOL(blk_start_queue); | 
 | 249 |  | 
 | 250 | /** | 
 | 251 |  * blk_stop_queue - stop a queue | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 252 |  * @q:    The &struct request_queue in question | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 |  * | 
 | 254 |  * Description: | 
 | 255 |  *   The Linux block layer assumes that a block driver will consume all | 
 | 256 |  *   entries on the request queue when the request_fn strategy is called. | 
 | 257 |  *   Often this will not happen, because of hardware limitations (queue | 
 | 258 |  *   depth settings). If a device driver gets a 'queue full' response, | 
 | 259 |  *   or if it simply chooses not to queue more I/O at one point, it can | 
 | 260 |  *   call this function to prevent the request_fn from being called until | 
 | 261 |  *   the driver has signalled it's ready to go again. This happens by calling | 
 | 262 |  *   blk_start_queue() to restart queue operations. Queue lock must be held. | 
 | 263 |  **/ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 264 | void blk_stop_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | { | 
| Tejun Heo | 136b572 | 2012-08-21 13:18:24 -0700 | [diff] [blame] | 266 | 	cancel_delayed_work(&q->delay_work); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 267 | 	queue_flag_set(QUEUE_FLAG_STOPPED, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | } | 
 | 269 | EXPORT_SYMBOL(blk_stop_queue); | 
 | 270 |  | 
 | 271 | /** | 
 | 272 |  * blk_sync_queue - cancel any pending callbacks on a queue | 
 | 273 |  * @q: the queue | 
 | 274 |  * | 
 | 275 |  * Description: | 
 | 276 |  *     The block layer may perform asynchronous callback activity | 
 | 277 |  *     on a queue, such as calling the unplug function after a timeout. | 
 | 278 |  *     A block device may call blk_sync_queue to ensure that any | 
 | 279 |  *     such activity is cancelled, thus allowing it to release resources | 
| Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 280 |  *     that the callbacks might use. The caller must already have made sure | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 |  *     that its ->make_request_fn will not re-add plugging prior to calling | 
 | 282 |  *     this function. | 
 | 283 |  * | 
| Vivek Goyal | da52777 | 2011-03-02 19:05:33 -0500 | [diff] [blame] | 284 |  *     This function does not cancel any asynchronous activity arising | 
 | 285 |  *     out of elevator or throttling code. That would require elevaotor_exit() | 
| Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 286 |  *     and blkcg_exit_queue() to be called with queue lock initialized. | 
| Vivek Goyal | da52777 | 2011-03-02 19:05:33 -0500 | [diff] [blame] | 287 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 |  */ | 
 | 289 | void blk_sync_queue(struct request_queue *q) | 
 | 290 | { | 
| Jens Axboe | 70ed28b | 2008-11-19 14:38:39 +0100 | [diff] [blame] | 291 | 	del_timer_sync(&q->timeout); | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 292 | 	cancel_delayed_work_sync(&q->delay_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | } | 
 | 294 | EXPORT_SYMBOL(blk_sync_queue); | 
 | 295 |  | 
 | 296 | /** | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 297 |  * __blk_run_queue_uncond - run a queue whether or not it has been stopped | 
 | 298 |  * @q:	The queue to run | 
 | 299 |  * | 
 | 300 |  * Description: | 
 | 301 |  *    Invoke request handling on a queue if there are any pending requests. | 
 | 302 |  *    May be used to restart request handling after a request has completed. | 
 | 303 |  *    This variant runs the queue whether or not the queue has been | 
 | 304 |  *    stopped. Must be called with the queue lock held and interrupts | 
 | 305 |  *    disabled. See also @blk_run_queue. | 
 | 306 |  */ | 
 | 307 | inline void __blk_run_queue_uncond(struct request_queue *q) | 
 | 308 | { | 
 | 309 | 	if (unlikely(blk_queue_dead(q))) | 
 | 310 | 		return; | 
 | 311 |  | 
| Bart Van Assche | 24faf6f | 2012-11-28 13:46:45 +0100 | [diff] [blame] | 312 | 	/* | 
 | 313 | 	 * Some request_fn implementations, e.g. scsi_request_fn(), unlock | 
 | 314 | 	 * the queue lock internally. As a result multiple threads may be | 
 | 315 | 	 * running such a request function concurrently. Keep track of the | 
 | 316 | 	 * number of active request_fn invocations such that blk_drain_queue() | 
 | 317 | 	 * can wait until all these request_fn calls have finished. | 
 | 318 | 	 */ | 
 | 319 | 	q->request_fn_active++; | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 320 | 	q->request_fn(q); | 
| Bart Van Assche | 24faf6f | 2012-11-28 13:46:45 +0100 | [diff] [blame] | 321 | 	q->request_fn_active--; | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 322 | } | 
 | 323 |  | 
 | 324 | /** | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 325 |  * __blk_run_queue - run a single device queue | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 |  * @q:	The queue to run | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 327 |  * | 
 | 328 |  * Description: | 
 | 329 |  *    See @blk_run_queue. This variant must be called with the queue lock | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 330 |  *    held and interrupts disabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 |  */ | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 332 | void __blk_run_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | { | 
| Tejun Heo | a538cd0 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 334 | 	if (unlikely(blk_queue_stopped(q))) | 
 | 335 | 		return; | 
 | 336 |  | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 337 | 	__blk_run_queue_uncond(q); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 338 | } | 
 | 339 | EXPORT_SYMBOL(__blk_run_queue); | 
| Jens Axboe | dac07ec | 2006-05-11 08:20:16 +0200 | [diff] [blame] | 340 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 341 | /** | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 342 |  * blk_run_queue_async - run a single device queue in workqueue context | 
 | 343 |  * @q:	The queue to run | 
 | 344 |  * | 
 | 345 |  * Description: | 
 | 346 |  *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf | 
| Bart Van Assche | 7046057 | 2012-11-28 13:45:56 +0100 | [diff] [blame] | 347 |  *    of us. The caller must hold the queue lock. | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 348 |  */ | 
 | 349 | void blk_run_queue_async(struct request_queue *q) | 
 | 350 | { | 
| Bart Van Assche | 7046057 | 2012-11-28 13:45:56 +0100 | [diff] [blame] | 351 | 	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) | 
| Tejun Heo | e7c2f96 | 2012-08-21 13:18:24 -0700 | [diff] [blame] | 352 | 		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 353 | } | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 354 | EXPORT_SYMBOL(blk_run_queue_async); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 355 |  | 
 | 356 | /** | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 357 |  * blk_run_queue - run a single device queue | 
 | 358 |  * @q: The queue to run | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 359 |  * | 
 | 360 |  * Description: | 
 | 361 |  *    Invoke request handling on this queue, if it has pending work to do. | 
| Tejun Heo | a7f5579 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 362 |  *    May be used to restart queueing when a request has completed. | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 363 |  */ | 
 | 364 | void blk_run_queue(struct request_queue *q) | 
 | 365 | { | 
 | 366 | 	unsigned long flags; | 
 | 367 |  | 
 | 368 | 	spin_lock_irqsave(q->queue_lock, flags); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 369 | 	__blk_run_queue(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 371 | } | 
 | 372 | EXPORT_SYMBOL(blk_run_queue); | 
 | 373 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 374 | void blk_put_queue(struct request_queue *q) | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 375 | { | 
 | 376 | 	kobject_put(&q->kobj); | 
 | 377 | } | 
| Jens Axboe | d86e0e8 | 2011-05-27 07:44:43 +0200 | [diff] [blame] | 378 | EXPORT_SYMBOL(blk_put_queue); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 379 |  | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 380 | /** | 
| Bart Van Assche | 807592a | 2012-11-28 13:43:38 +0100 | [diff] [blame] | 381 |  * __blk_drain_queue - drain requests from request_queue | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 382 |  * @q: queue to drain | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 383 |  * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 384 |  * | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 385 |  * Drain requests from @q.  If @drain_all is set, all requests are drained. | 
 | 386 |  * If not, only ELVPRIV requests are drained.  The caller is responsible | 
 | 387 |  * for ensuring that no new requests which need to be drained are queued. | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 388 |  */ | 
| Bart Van Assche | 807592a | 2012-11-28 13:43:38 +0100 | [diff] [blame] | 389 | static void __blk_drain_queue(struct request_queue *q, bool drain_all) | 
 | 390 | 	__releases(q->queue_lock) | 
 | 391 | 	__acquires(q->queue_lock) | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 392 | { | 
| Asias He | 458f27a | 2012-06-15 08:45:25 +0200 | [diff] [blame] | 393 | 	int i; | 
 | 394 |  | 
| Bart Van Assche | 807592a | 2012-11-28 13:43:38 +0100 | [diff] [blame] | 395 | 	lockdep_assert_held(q->queue_lock); | 
 | 396 |  | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 397 | 	while (true) { | 
| Tejun Heo | 481a7d6 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 398 | 		bool drain = false; | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 399 |  | 
| Tejun Heo | b855b04 | 2012-03-06 21:24:55 +0100 | [diff] [blame] | 400 | 		/* | 
 | 401 | 		 * The caller might be trying to drain @q before its | 
 | 402 | 		 * elevator is initialized. | 
 | 403 | 		 */ | 
 | 404 | 		if (q->elevator) | 
 | 405 | 			elv_drain_elevator(q); | 
 | 406 |  | 
| Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 407 | 		blkcg_drain_queue(q); | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 408 |  | 
| Tejun Heo | 4eabc94 | 2011-12-15 20:03:04 +0100 | [diff] [blame] | 409 | 		/* | 
 | 410 | 		 * This function might be called on a queue which failed | 
| Tejun Heo | b855b04 | 2012-03-06 21:24:55 +0100 | [diff] [blame] | 411 | 		 * driver init after queue creation or is not yet fully | 
 | 412 | 		 * active yet.  Some drivers (e.g. fd and loop) get unhappy | 
 | 413 | 		 * in such cases.  Kick queue iff dispatch queue has | 
 | 414 | 		 * something on it and @q has request_fn set. | 
| Tejun Heo | 4eabc94 | 2011-12-15 20:03:04 +0100 | [diff] [blame] | 415 | 		 */ | 
| Tejun Heo | b855b04 | 2012-03-06 21:24:55 +0100 | [diff] [blame] | 416 | 		if (!list_empty(&q->queue_head) && q->request_fn) | 
| Tejun Heo | 4eabc94 | 2011-12-15 20:03:04 +0100 | [diff] [blame] | 417 | 			__blk_run_queue(q); | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 418 |  | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 419 | 		drain |= q->nr_rqs_elvpriv; | 
| Bart Van Assche | 24faf6f | 2012-11-28 13:46:45 +0100 | [diff] [blame] | 420 | 		drain |= q->request_fn_active; | 
| Tejun Heo | 481a7d6 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 421 |  | 
 | 422 | 		/* | 
 | 423 | 		 * Unfortunately, requests are queued at and tracked from | 
 | 424 | 		 * multiple places and there's no single counter which can | 
 | 425 | 		 * be drained.  Check all the queues and counters. | 
 | 426 | 		 */ | 
 | 427 | 		if (drain_all) { | 
 | 428 | 			drain |= !list_empty(&q->queue_head); | 
 | 429 | 			for (i = 0; i < 2; i++) { | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 430 | 				drain |= q->nr_rqs[i]; | 
| Tejun Heo | 481a7d6 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 431 | 				drain |= q->in_flight[i]; | 
 | 432 | 				drain |= !list_empty(&q->flush_queue[i]); | 
 | 433 | 			} | 
 | 434 | 		} | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 435 |  | 
| Tejun Heo | 481a7d6 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 436 | 		if (!drain) | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 437 | 			break; | 
| Bart Van Assche | 807592a | 2012-11-28 13:43:38 +0100 | [diff] [blame] | 438 |  | 
 | 439 | 		spin_unlock_irq(q->queue_lock); | 
 | 440 |  | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 441 | 		msleep(10); | 
| Bart Van Assche | 807592a | 2012-11-28 13:43:38 +0100 | [diff] [blame] | 442 |  | 
 | 443 | 		spin_lock_irq(q->queue_lock); | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 444 | 	} | 
| Asias He | 458f27a | 2012-06-15 08:45:25 +0200 | [diff] [blame] | 445 |  | 
 | 446 | 	/* | 
 | 447 | 	 * With queue marked dead, any woken up waiter will fail the | 
 | 448 | 	 * allocation path, so the wakeup chaining is lost and we're | 
 | 449 | 	 * left with hung waiters. We need to wake up those waiters. | 
 | 450 | 	 */ | 
 | 451 | 	if (q->request_fn) { | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 452 | 		struct request_list *rl; | 
 | 453 |  | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 454 | 		blk_queue_for_each_rl(rl, q) | 
 | 455 | 			for (i = 0; i < ARRAY_SIZE(rl->wait); i++) | 
 | 456 | 				wake_up_all(&rl->wait[i]); | 
| Asias He | 458f27a | 2012-06-15 08:45:25 +0200 | [diff] [blame] | 457 | 	} | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 458 | } | 
 | 459 |  | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 460 | /** | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 461 |  * blk_queue_bypass_start - enter queue bypass mode | 
 | 462 |  * @q: queue of interest | 
 | 463 |  * | 
 | 464 |  * In bypass mode, only the dispatch FIFO queue of @q is used.  This | 
 | 465 |  * function makes @q enter bypass mode and drains all requests which were | 
| Tejun Heo | 6ecf23a | 2012-03-05 13:14:59 -0800 | [diff] [blame] | 466 |  * throttled or issued before.  On return, it's guaranteed that no request | 
| Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 467 |  * is being throttled or has ELVPRIV set and blk_queue_bypass() %true | 
 | 468 |  * inside queue or RCU read lock. | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 469 |  */ | 
 | 470 | void blk_queue_bypass_start(struct request_queue *q) | 
 | 471 | { | 
| Tejun Heo | b82d4b1 | 2012-04-13 13:11:31 -0700 | [diff] [blame] | 472 | 	bool drain; | 
 | 473 |  | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 474 | 	spin_lock_irq(q->queue_lock); | 
| Tejun Heo | b82d4b1 | 2012-04-13 13:11:31 -0700 | [diff] [blame] | 475 | 	drain = !q->bypass_depth++; | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 476 | 	queue_flag_set(QUEUE_FLAG_BYPASS, q); | 
 | 477 | 	spin_unlock_irq(q->queue_lock); | 
 | 478 |  | 
| Tejun Heo | b82d4b1 | 2012-04-13 13:11:31 -0700 | [diff] [blame] | 479 | 	if (drain) { | 
| Bart Van Assche | 807592a | 2012-11-28 13:43:38 +0100 | [diff] [blame] | 480 | 		spin_lock_irq(q->queue_lock); | 
 | 481 | 		__blk_drain_queue(q, false); | 
 | 482 | 		spin_unlock_irq(q->queue_lock); | 
 | 483 |  | 
| Tejun Heo | b82d4b1 | 2012-04-13 13:11:31 -0700 | [diff] [blame] | 484 | 		/* ensure blk_queue_bypass() is %true inside RCU read lock */ | 
 | 485 | 		synchronize_rcu(); | 
 | 486 | 	} | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 487 | } | 
 | 488 | EXPORT_SYMBOL_GPL(blk_queue_bypass_start); | 
 | 489 |  | 
 | 490 | /** | 
 | 491 |  * blk_queue_bypass_end - leave queue bypass mode | 
 | 492 |  * @q: queue of interest | 
 | 493 |  * | 
 | 494 |  * Leave bypass mode and restore the normal queueing behavior. | 
 | 495 |  */ | 
 | 496 | void blk_queue_bypass_end(struct request_queue *q) | 
 | 497 | { | 
 | 498 | 	spin_lock_irq(q->queue_lock); | 
 | 499 | 	if (!--q->bypass_depth) | 
 | 500 | 		queue_flag_clear(QUEUE_FLAG_BYPASS, q); | 
 | 501 | 	WARN_ON_ONCE(q->bypass_depth < 0); | 
 | 502 | 	spin_unlock_irq(q->queue_lock); | 
 | 503 | } | 
 | 504 | EXPORT_SYMBOL_GPL(blk_queue_bypass_end); | 
 | 505 |  | 
 | 506 | /** | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 507 |  * blk_cleanup_queue - shutdown a request queue | 
 | 508 |  * @q: request queue to shutdown | 
 | 509 |  * | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 510 |  * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and | 
 | 511 |  * put it.  All future requests will be failed immediately with -ENODEV. | 
| Vivek Goyal | c94a96a | 2011-03-02 19:04:42 -0500 | [diff] [blame] | 512 |  */ | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 513 | void blk_cleanup_queue(struct request_queue *q) | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 514 | { | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 515 | 	spinlock_t *lock = q->queue_lock; | 
| Jens Axboe | e3335de | 2008-09-18 09:22:54 -0700 | [diff] [blame] | 516 |  | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 517 | 	/* mark @q DYING, no new request or merges will be allowed afterwards */ | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 518 | 	mutex_lock(&q->sysfs_lock); | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 519 | 	queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 520 | 	spin_lock_irq(lock); | 
| Tejun Heo | 6ecf23a | 2012-03-05 13:14:59 -0800 | [diff] [blame] | 521 |  | 
| Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 522 | 	/* | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 523 | 	 * A dying queue is permanently in bypass mode till released.  Note | 
| Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 524 | 	 * that, unlike blk_queue_bypass_start(), we aren't performing | 
 | 525 | 	 * synchronize_rcu() after entering bypass mode to avoid the delay | 
 | 526 | 	 * as some drivers create and destroy a lot of queues while | 
 | 527 | 	 * probing.  This is still safe because blk_release_queue() will be | 
 | 528 | 	 * called only after the queue refcnt drops to zero and nothing, | 
 | 529 | 	 * RCU or not, would be traversing the queue by then. | 
 | 530 | 	 */ | 
| Tejun Heo | 6ecf23a | 2012-03-05 13:14:59 -0800 | [diff] [blame] | 531 | 	q->bypass_depth++; | 
 | 532 | 	queue_flag_set(QUEUE_FLAG_BYPASS, q); | 
 | 533 |  | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 534 | 	queue_flag_set(QUEUE_FLAG_NOMERGES, q); | 
 | 535 | 	queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 536 | 	queue_flag_set(QUEUE_FLAG_DYING, q); | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 537 | 	spin_unlock_irq(lock); | 
 | 538 | 	mutex_unlock(&q->sysfs_lock); | 
 | 539 |  | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 540 | 	/* | 
 | 541 | 	 * Drain all requests queued before DYING marking. Set DEAD flag to | 
 | 542 | 	 * prevent that q->request_fn() gets invoked after draining finished. | 
 | 543 | 	 */ | 
| Bart Van Assche | 807592a | 2012-11-28 13:43:38 +0100 | [diff] [blame] | 544 | 	spin_lock_irq(lock); | 
 | 545 | 	__blk_drain_queue(q, true); | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 546 | 	queue_flag_set(QUEUE_FLAG_DEAD, q); | 
| Bart Van Assche | 807592a | 2012-11-28 13:43:38 +0100 | [diff] [blame] | 547 | 	spin_unlock_irq(lock); | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 548 |  | 
 | 549 | 	/* @q won't process any more request, flush async actions */ | 
 | 550 | 	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | 
 | 551 | 	blk_sync_queue(q); | 
 | 552 |  | 
| Asias He | 5e5cfac | 2012-05-24 23:28:52 +0800 | [diff] [blame] | 553 | 	spin_lock_irq(lock); | 
 | 554 | 	if (q->queue_lock != &q->__queue_lock) | 
 | 555 | 		q->queue_lock = &q->__queue_lock; | 
 | 556 | 	spin_unlock_irq(lock); | 
 | 557 |  | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 558 | 	/* @q is and will stay empty, shutdown and put */ | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 559 | 	blk_put_queue(q); | 
 | 560 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | EXPORT_SYMBOL(blk_cleanup_queue); | 
 | 562 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 563 | int blk_init_rl(struct request_list *rl, struct request_queue *q, | 
 | 564 | 		gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | { | 
| Mike Snitzer | 1abec4f | 2010-05-25 13:15:15 -0400 | [diff] [blame] | 566 | 	if (unlikely(rl->rq_pool)) | 
 | 567 | 		return 0; | 
 | 568 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 569 | 	rl->q = q; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 570 | 	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; | 
 | 571 | 	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 572 | 	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); | 
 | 573 | 	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 |  | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 575 | 	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 
| Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 576 | 					  mempool_free_slab, request_cachep, | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 577 | 					  gfp_mask, q->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | 	if (!rl->rq_pool) | 
 | 579 | 		return -ENOMEM; | 
 | 580 |  | 
 | 581 | 	return 0; | 
 | 582 | } | 
 | 583 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 584 | void blk_exit_rl(struct request_list *rl) | 
 | 585 | { | 
 | 586 | 	if (rl->rq_pool) | 
 | 587 | 		mempool_destroy(rl->rq_pool); | 
 | 588 | } | 
 | 589 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 590 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | { | 
| Ezequiel Garcia | c304a51 | 2012-11-10 10:39:44 +0100 | [diff] [blame] | 592 | 	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 593 | } | 
 | 594 | EXPORT_SYMBOL(blk_alloc_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 596 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 597 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 598 | 	struct request_queue *q; | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 599 | 	int err; | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 600 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 601 | 	q = kmem_cache_alloc_node(blk_requestq_cachep, | 
| Christoph Lameter | 94f6030 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 602 | 				gfp_mask | __GFP_ZERO, node_id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | 	if (!q) | 
 | 604 | 		return NULL; | 
 | 605 |  | 
| Dan Carpenter | 00380a4 | 2012-03-23 09:58:54 +0100 | [diff] [blame] | 606 | 	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 607 | 	if (q->id < 0) | 
 | 608 | 		goto fail_q; | 
 | 609 |  | 
| Jens Axboe | 0989a02 | 2009-06-12 14:42:56 +0200 | [diff] [blame] | 610 | 	q->backing_dev_info.ra_pages = | 
 | 611 | 			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 
 | 612 | 	q->backing_dev_info.state = 0; | 
 | 613 | 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | 
| Jens Axboe | d993831 | 2009-06-12 14:45:52 +0200 | [diff] [blame] | 614 | 	q->backing_dev_info.name = "block"; | 
| Mike Snitzer | 5151412 | 2011-11-23 10:59:13 +0100 | [diff] [blame] | 615 | 	q->node = node_id; | 
| Jens Axboe | 0989a02 | 2009-06-12 14:42:56 +0200 | [diff] [blame] | 616 |  | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 617 | 	err = bdi_init(&q->backing_dev_info); | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 618 | 	if (err) | 
 | 619 | 		goto fail_id; | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 620 |  | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 621 | 	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, | 
 | 622 | 		    laptop_mode_timer_fn, (unsigned long) q); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 623 | 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 
| Tejun Heo | b855b04 | 2012-03-06 21:24:55 +0100 | [diff] [blame] | 624 | 	INIT_LIST_HEAD(&q->queue_head); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 625 | 	INIT_LIST_HEAD(&q->timeout_list); | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 626 | 	INIT_LIST_HEAD(&q->icq_list); | 
| Tejun Heo | 4eef304 | 2012-03-05 13:15:18 -0800 | [diff] [blame] | 627 | #ifdef CONFIG_BLK_CGROUP | 
| Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 628 | 	INIT_LIST_HEAD(&q->blkg_list); | 
| Tejun Heo | 4eef304 | 2012-03-05 13:15:18 -0800 | [diff] [blame] | 629 | #endif | 
| Tejun Heo | ae1b153 | 2011-01-25 12:43:54 +0100 | [diff] [blame] | 630 | 	INIT_LIST_HEAD(&q->flush_queue[0]); | 
 | 631 | 	INIT_LIST_HEAD(&q->flush_queue[1]); | 
 | 632 | 	INIT_LIST_HEAD(&q->flush_data_in_flight); | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 633 | 	INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 634 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 635 | 	kobject_init(&q->kobj, &blk_queue_ktype); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 |  | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 637 | 	mutex_init(&q->sysfs_lock); | 
| Neil Brown | e7e72bf | 2008-05-14 16:05:54 -0700 | [diff] [blame] | 638 | 	spin_lock_init(&q->__queue_lock); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 639 |  | 
| Vivek Goyal | c94a96a | 2011-03-02 19:04:42 -0500 | [diff] [blame] | 640 | 	/* | 
 | 641 | 	 * By default initialize queue_lock to internal lock and driver can | 
 | 642 | 	 * override it later if need be. | 
 | 643 | 	 */ | 
 | 644 | 	q->queue_lock = &q->__queue_lock; | 
 | 645 |  | 
| Tejun Heo | b82d4b1 | 2012-04-13 13:11:31 -0700 | [diff] [blame] | 646 | 	/* | 
 | 647 | 	 * A queue starts its life with bypass turned on to avoid | 
 | 648 | 	 * unnecessary bypass on/off overhead and nasty surprises during | 
| Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 649 | 	 * init.  The initial bypass will be finished when the queue is | 
 | 650 | 	 * registered by blk_register_queue(). | 
| Tejun Heo | b82d4b1 | 2012-04-13 13:11:31 -0700 | [diff] [blame] | 651 | 	 */ | 
 | 652 | 	q->bypass_depth = 1; | 
 | 653 | 	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); | 
 | 654 |  | 
| Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 655 | 	if (blkcg_init_queue(q)) | 
| Tejun Heo | f51b802 | 2012-03-05 13:15:05 -0800 | [diff] [blame] | 656 | 		goto fail_id; | 
 | 657 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | 	return q; | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 659 |  | 
 | 660 | fail_id: | 
 | 661 | 	ida_simple_remove(&blk_queue_ida, q->id); | 
 | 662 | fail_q: | 
 | 663 | 	kmem_cache_free(blk_requestq_cachep, q); | 
 | 664 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | } | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 666 | EXPORT_SYMBOL(blk_alloc_queue_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 |  | 
 | 668 | /** | 
 | 669 |  * blk_init_queue  - prepare a request queue for use with a block device | 
 | 670 |  * @rfn:  The function to be called to process requests that have been | 
 | 671 |  *        placed on the queue. | 
 | 672 |  * @lock: Request queue spin lock | 
 | 673 |  * | 
 | 674 |  * Description: | 
 | 675 |  *    If a block device wishes to use the standard request handling procedures, | 
 | 676 |  *    which sorts requests and coalesces adjacent requests, then it must | 
 | 677 |  *    call blk_init_queue().  The function @rfn will be called when there | 
 | 678 |  *    are requests on the queue that need to be processed.  If the device | 
 | 679 |  *    supports plugging, then @rfn may not be called immediately when requests | 
 | 680 |  *    are available on the queue, but may be called at some time later instead. | 
 | 681 |  *    Plugged queues are generally unplugged when a buffer belonging to one | 
 | 682 |  *    of the requests on the queue is needed, or due to memory pressure. | 
 | 683 |  * | 
 | 684 |  *    @rfn is not required, or even expected, to remove all requests off the | 
 | 685 |  *    queue, but only as many as it can handle at a time.  If it does leave | 
 | 686 |  *    requests on the queue, it is responsible for arranging that the requests | 
 | 687 |  *    get dealt with eventually. | 
 | 688 |  * | 
 | 689 |  *    The queue spin lock must be held while manipulating the requests on the | 
| Paolo 'Blaisorblade' Giarrusso | a038e25 | 2006-06-05 12:09:01 +0200 | [diff] [blame] | 690 |  *    request queue; this lock will be taken also from interrupt context, so irq | 
 | 691 |  *    disabling is needed for it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 |  * | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 693 |  *    Function returns a pointer to the initialized request queue, or %NULL if | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 |  *    it didn't succeed. | 
 | 695 |  * | 
 | 696 |  * Note: | 
 | 697 |  *    blk_init_queue() must be paired with a blk_cleanup_queue() call | 
 | 698 |  *    when the block device is deactivated (such as at module unload). | 
 | 699 |  **/ | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 700 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 701 | struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | { | 
| Ezequiel Garcia | c304a51 | 2012-11-10 10:39:44 +0100 | [diff] [blame] | 703 | 	return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 704 | } | 
 | 705 | EXPORT_SYMBOL(blk_init_queue); | 
 | 706 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 707 | struct request_queue * | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 708 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | 
 | 709 | { | 
| Mike Snitzer | c86d1b8 | 2010-06-03 11:34:52 -0600 | [diff] [blame] | 710 | 	struct request_queue *uninit_q, *q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 |  | 
| Mike Snitzer | c86d1b8 | 2010-06-03 11:34:52 -0600 | [diff] [blame] | 712 | 	uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); | 
 | 713 | 	if (!uninit_q) | 
 | 714 | 		return NULL; | 
 | 715 |  | 
| Mike Snitzer | 5151412 | 2011-11-23 10:59:13 +0100 | [diff] [blame] | 716 | 	q = blk_init_allocated_queue(uninit_q, rfn, lock); | 
| Mike Snitzer | c86d1b8 | 2010-06-03 11:34:52 -0600 | [diff] [blame] | 717 | 	if (!q) | 
 | 718 | 		blk_cleanup_queue(uninit_q); | 
 | 719 |  | 
 | 720 | 	return q; | 
| Mike Snitzer | 01effb0 | 2010-05-11 08:57:42 +0200 | [diff] [blame] | 721 | } | 
 | 722 | EXPORT_SYMBOL(blk_init_queue_node); | 
 | 723 |  | 
 | 724 | struct request_queue * | 
 | 725 | blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, | 
 | 726 | 			 spinlock_t *lock) | 
 | 727 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | 	if (!q) | 
 | 729 | 		return NULL; | 
 | 730 |  | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 731 | 	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) | 
| Al Viro | 8669aaf | 2006-03-18 13:50:00 -0500 | [diff] [blame] | 732 | 		return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 |  | 
 | 734 | 	q->request_fn		= rfn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | 	q->prep_rq_fn		= NULL; | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 736 | 	q->unprep_rq_fn		= NULL; | 
| Tejun Heo | 60ea822 | 2012-09-20 14:09:30 -0700 | [diff] [blame] | 737 | 	q->queue_flags		|= QUEUE_FLAG_DEFAULT; | 
| Vivek Goyal | c94a96a | 2011-03-02 19:04:42 -0500 | [diff] [blame] | 738 |  | 
 | 739 | 	/* Override internal queue lock with supplied lock pointer */ | 
 | 740 | 	if (lock) | 
 | 741 | 		q->queue_lock		= lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 |  | 
| Jens Axboe | f3b144a | 2009-03-06 08:48:33 +0100 | [diff] [blame] | 743 | 	/* | 
 | 744 | 	 * This also sets hw/phys segments, boundary and size | 
 | 745 | 	 */ | 
| Jens Axboe | c20e8de | 2011-09-12 12:03:37 +0200 | [diff] [blame] | 746 | 	blk_queue_make_request(q, blk_queue_bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 |  | 
| Alan Stern | 44ec954 | 2007-02-20 11:01:57 -0500 | [diff] [blame] | 748 | 	q->sg_reserved_size = INT_MAX; | 
 | 749 |  | 
| Tejun Heo | b82d4b1 | 2012-04-13 13:11:31 -0700 | [diff] [blame] | 750 | 	/* init elevator */ | 
 | 751 | 	if (elevator_init(q, NULL)) | 
 | 752 | 		return NULL; | 
| Tejun Heo | b82d4b1 | 2012-04-13 13:11:31 -0700 | [diff] [blame] | 753 | 	return q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | } | 
| Mike Snitzer | 5151412 | 2011-11-23 10:59:13 +0100 | [diff] [blame] | 755 | EXPORT_SYMBOL(blk_init_allocated_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 |  | 
| Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 757 | bool blk_get_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | { | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 759 | 	if (likely(!blk_queue_dying(q))) { | 
| Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 760 | 		__blk_get_queue(q); | 
 | 761 | 		return true; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | 	} | 
 | 763 |  | 
| Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 764 | 	return false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | } | 
| Jens Axboe | d86e0e8 | 2011-05-27 07:44:43 +0200 | [diff] [blame] | 766 | EXPORT_SYMBOL(blk_get_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 768 | static inline void blk_free_request(struct request_list *rl, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | { | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 770 | 	if (rq->cmd_flags & REQ_ELVPRIV) { | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 771 | 		elv_put_request(rl->q, rq); | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 772 | 		if (rq->elv.icq) | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 773 | 			put_io_context(rq->elv.icq->ioc); | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 774 | 	} | 
 | 775 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 776 | 	mempool_free(rq, rl->rq_pool); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | } | 
 | 778 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | /* | 
 | 780 |  * ioc_batching returns true if the ioc is a valid batching request and | 
 | 781 |  * should be given priority access to a request. | 
 | 782 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 783 | static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | { | 
 | 785 | 	if (!ioc) | 
 | 786 | 		return 0; | 
 | 787 |  | 
 | 788 | 	/* | 
 | 789 | 	 * Make sure the process is able to allocate at least 1 request | 
 | 790 | 	 * even if the batch times out, otherwise we could theoretically | 
 | 791 | 	 * lose wakeups. | 
 | 792 | 	 */ | 
 | 793 | 	return ioc->nr_batch_requests == q->nr_batching || | 
 | 794 | 		(ioc->nr_batch_requests > 0 | 
 | 795 | 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); | 
 | 796 | } | 
 | 797 |  | 
 | 798 | /* | 
 | 799 |  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This | 
 | 800 |  * will cause the process to be a "batcher" on all queues in the system. This | 
 | 801 |  * is the behaviour we want though - once it gets a wakeup it should be given | 
 | 802 |  * a nice run. | 
 | 803 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 804 | static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | { | 
 | 806 | 	if (!ioc || ioc_batching(q, ioc)) | 
 | 807 | 		return; | 
 | 808 |  | 
 | 809 | 	ioc->nr_batch_requests = q->nr_batching; | 
 | 810 | 	ioc->last_waited = jiffies; | 
 | 811 | } | 
 | 812 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 813 | static void __freed_request(struct request_list *rl, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | { | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 815 | 	struct request_queue *q = rl->q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 |  | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 817 | 	/* | 
 | 818 | 	 * bdi isn't aware of blkcg yet.  As all async IOs end up root | 
 | 819 | 	 * blkcg anyway, just use root blkcg state. | 
 | 820 | 	 */ | 
 | 821 | 	if (rl == &q->root_rl && | 
 | 822 | 	    rl->count[sync] < queue_congestion_off_threshold(q)) | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 823 | 		blk_clear_queue_congested(q, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 825 | 	if (rl->count[sync] + 1 <= q->nr_requests) { | 
 | 826 | 		if (waitqueue_active(&rl->wait[sync])) | 
 | 827 | 			wake_up(&rl->wait[sync]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 829 | 		blk_clear_rl_full(rl, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | 	} | 
 | 831 | } | 
 | 832 |  | 
 | 833 | /* | 
 | 834 |  * A request has just been released.  Account for it, update the full and | 
 | 835 |  * congestion status, wake up any waiters.   Called under q->queue_lock. | 
 | 836 |  */ | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 837 | static void freed_request(struct request_list *rl, unsigned int flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | { | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 839 | 	struct request_queue *q = rl->q; | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 840 | 	int sync = rw_is_sync(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 |  | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 842 | 	q->nr_rqs[sync]--; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 843 | 	rl->count[sync]--; | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 844 | 	if (flags & REQ_ELVPRIV) | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 845 | 		q->nr_rqs_elvpriv--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 847 | 	__freed_request(rl, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 849 | 	if (unlikely(rl->starved[sync ^ 1])) | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 850 | 		__freed_request(rl, sync ^ 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | } | 
 | 852 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | /* | 
| Mike Snitzer | 9d5a4e9 | 2011-02-11 11:05:46 +0100 | [diff] [blame] | 854 |  * Determine if elevator data should be initialized when allocating the | 
 | 855 |  * request associated with @bio. | 
 | 856 |  */ | 
 | 857 | static bool blk_rq_should_init_elevator(struct bio *bio) | 
 | 858 | { | 
 | 859 | 	if (!bio) | 
 | 860 | 		return true; | 
 | 861 |  | 
 | 862 | 	/* | 
 | 863 | 	 * Flush requests do not use the elevator so skip initialization. | 
 | 864 | 	 * This allows a request to share the flush and elevator data. | 
 | 865 | 	 */ | 
 | 866 | 	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) | 
 | 867 | 		return false; | 
 | 868 |  | 
 | 869 | 	return true; | 
 | 870 | } | 
 | 871 |  | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 872 | /** | 
| Tejun Heo | 852c788 | 2012-03-05 13:15:27 -0800 | [diff] [blame] | 873 |  * rq_ioc - determine io_context for request allocation | 
 | 874 |  * @bio: request being allocated is for this bio (can be %NULL) | 
 | 875 |  * | 
 | 876 |  * Determine io_context to use for request allocation for @bio.  May return | 
 | 877 |  * %NULL if %current->io_context doesn't exist. | 
 | 878 |  */ | 
 | 879 | static struct io_context *rq_ioc(struct bio *bio) | 
 | 880 | { | 
 | 881 | #ifdef CONFIG_BLK_CGROUP | 
 | 882 | 	if (bio && bio->bi_ioc) | 
 | 883 | 		return bio->bi_ioc; | 
 | 884 | #endif | 
 | 885 | 	return current->io_context; | 
 | 886 | } | 
 | 887 |  | 
 | 888 | /** | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 889 |  * __get_request - get a free request | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 890 |  * @rl: request list to allocate from | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 891 |  * @rw_flags: RW and SYNC flags | 
 | 892 |  * @bio: bio to allocate request for (can be %NULL) | 
 | 893 |  * @gfp_mask: allocation mask | 
 | 894 |  * | 
 | 895 |  * Get a free request from @q.  This function may fail under memory | 
 | 896 |  * pressure or if @q is dead. | 
 | 897 |  * | 
 | 898 |  * Must be callled with @q->queue_lock held and, | 
 | 899 |  * Returns %NULL on failure, with @q->queue_lock held. | 
 | 900 |  * Returns !%NULL on success, with @q->queue_lock *not held*. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 |  */ | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 902 | static struct request *__get_request(struct request_list *rl, int rw_flags, | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 903 | 				     struct bio *bio, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | { | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 905 | 	struct request_queue *q = rl->q; | 
| Tejun Heo | b679281 | 2012-03-05 13:15:23 -0800 | [diff] [blame] | 906 | 	struct request *rq; | 
| Tejun Heo | 7f4b35d | 2012-06-04 20:40:56 -0700 | [diff] [blame] | 907 | 	struct elevator_type *et = q->elevator->type; | 
 | 908 | 	struct io_context *ioc = rq_ioc(bio); | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 909 | 	struct io_cq *icq = NULL; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 910 | 	const bool is_sync = rw_is_sync(rw_flags) != 0; | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 911 | 	int may_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 |  | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 913 | 	if (unlikely(blk_queue_dying(q))) | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 914 | 		return NULL; | 
 | 915 |  | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 916 | 	may_queue = elv_may_queue(q, rw_flags); | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 917 | 	if (may_queue == ELV_MQUEUE_NO) | 
 | 918 | 		goto rq_starved; | 
 | 919 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 920 | 	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { | 
 | 921 | 		if (rl->count[is_sync]+1 >= q->nr_requests) { | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 922 | 			/* | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 923 | 			 * The queue will fill after this allocation, so set | 
 | 924 | 			 * it as full, and mark this process as "batching". | 
 | 925 | 			 * This process will be allowed to complete a batch of | 
 | 926 | 			 * requests, others will be blocked. | 
 | 927 | 			 */ | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 928 | 			if (!blk_rl_full(rl, is_sync)) { | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 929 | 				ioc_set_batching(q, ioc); | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 930 | 				blk_set_rl_full(rl, is_sync); | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 931 | 			} else { | 
 | 932 | 				if (may_queue != ELV_MQUEUE_MUST | 
 | 933 | 						&& !ioc_batching(q, ioc)) { | 
 | 934 | 					/* | 
 | 935 | 					 * The queue is full and the allocating | 
 | 936 | 					 * process is not a "batcher", and not | 
 | 937 | 					 * exempted by the IO scheduler | 
 | 938 | 					 */ | 
| Tejun Heo | b679281 | 2012-03-05 13:15:23 -0800 | [diff] [blame] | 939 | 					return NULL; | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 940 | 				} | 
 | 941 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | 		} | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 943 | 		/* | 
 | 944 | 		 * bdi isn't aware of blkcg yet.  As all async IOs end up | 
 | 945 | 		 * root blkcg anyway, just use root blkcg state. | 
 | 946 | 		 */ | 
 | 947 | 		if (rl == &q->root_rl) | 
 | 948 | 			blk_set_queue_congested(q, is_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | 	} | 
 | 950 |  | 
| Jens Axboe | 082cf69 | 2005-06-28 16:35:11 +0200 | [diff] [blame] | 951 | 	/* | 
 | 952 | 	 * Only allow batching queuers to allocate up to 50% over the defined | 
 | 953 | 	 * limit of requests, otherwise we could have thousands of requests | 
 | 954 | 	 * allocated with any setting of ->nr_requests | 
 | 955 | 	 */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 956 | 	if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) | 
| Tejun Heo | b679281 | 2012-03-05 13:15:23 -0800 | [diff] [blame] | 957 | 		return NULL; | 
| Hugh Dickins | fd782a4 | 2005-06-29 15:15:40 +0100 | [diff] [blame] | 958 |  | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 959 | 	q->nr_rqs[is_sync]++; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 960 | 	rl->count[is_sync]++; | 
 | 961 | 	rl->starved[is_sync] = 0; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 962 |  | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 963 | 	/* | 
 | 964 | 	 * Decide whether the new request will be managed by elevator.  If | 
 | 965 | 	 * so, mark @rw_flags and increment elvpriv.  Non-zero elvpriv will | 
 | 966 | 	 * prevent the current elevator from being destroyed until the new | 
 | 967 | 	 * request is freed.  This guarantees icq's won't be destroyed and | 
 | 968 | 	 * makes creating new ones safe. | 
 | 969 | 	 * | 
 | 970 | 	 * Also, lookup icq while holding queue_lock.  If it doesn't exist, | 
 | 971 | 	 * it will be created after releasing queue_lock. | 
 | 972 | 	 */ | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 973 | 	if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 974 | 		rw_flags |= REQ_ELVPRIV; | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 975 | 		q->nr_rqs_elvpriv++; | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 976 | 		if (et->icq_cache && ioc) | 
 | 977 | 			icq = ioc_lookup_icq(ioc, q); | 
| Mike Snitzer | 9d5a4e9 | 2011-02-11 11:05:46 +0100 | [diff] [blame] | 978 | 	} | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 979 |  | 
| Jens Axboe | f253b86 | 2010-10-24 22:06:02 +0200 | [diff] [blame] | 980 | 	if (blk_queue_io_stat(q)) | 
 | 981 | 		rw_flags |= REQ_IO_STAT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | 	spin_unlock_irq(q->queue_lock); | 
 | 983 |  | 
| Tejun Heo | 29e2b09 | 2012-04-19 16:29:21 -0700 | [diff] [blame] | 984 | 	/* allocate and init request */ | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 985 | 	rq = mempool_alloc(rl->rq_pool, gfp_mask); | 
| Tejun Heo | 29e2b09 | 2012-04-19 16:29:21 -0700 | [diff] [blame] | 986 | 	if (!rq) | 
| Tejun Heo | b679281 | 2012-03-05 13:15:23 -0800 | [diff] [blame] | 987 | 		goto fail_alloc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 |  | 
| Tejun Heo | 29e2b09 | 2012-04-19 16:29:21 -0700 | [diff] [blame] | 989 | 	blk_rq_init(q, rq); | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 990 | 	blk_rq_set_rl(rq, rl); | 
| Tejun Heo | 29e2b09 | 2012-04-19 16:29:21 -0700 | [diff] [blame] | 991 | 	rq->cmd_flags = rw_flags | REQ_ALLOCED; | 
 | 992 |  | 
| Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 993 | 	/* init elvpriv */ | 
| Tejun Heo | 29e2b09 | 2012-04-19 16:29:21 -0700 | [diff] [blame] | 994 | 	if (rw_flags & REQ_ELVPRIV) { | 
| Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 995 | 		if (unlikely(et->icq_cache && !icq)) { | 
| Tejun Heo | 7f4b35d | 2012-06-04 20:40:56 -0700 | [diff] [blame] | 996 | 			if (ioc) | 
 | 997 | 				icq = ioc_create_icq(ioc, q, gfp_mask); | 
| Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 998 | 			if (!icq) | 
 | 999 | 				goto fail_elvpriv; | 
| Tejun Heo | 29e2b09 | 2012-04-19 16:29:21 -0700 | [diff] [blame] | 1000 | 		} | 
| Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 1001 |  | 
 | 1002 | 		rq->elv.icq = icq; | 
 | 1003 | 		if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) | 
 | 1004 | 			goto fail_elvpriv; | 
 | 1005 |  | 
 | 1006 | 		/* @rq->elv.icq holds io_context until @rq is freed */ | 
| Tejun Heo | 29e2b09 | 2012-04-19 16:29:21 -0700 | [diff] [blame] | 1007 | 		if (icq) | 
 | 1008 | 			get_io_context(icq->ioc); | 
 | 1009 | 	} | 
| Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 1010 | out: | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 1011 | 	/* | 
 | 1012 | 	 * ioc may be NULL here, and ioc_batching will be false. That's | 
 | 1013 | 	 * OK, if the queue is under the request limit then requests need | 
 | 1014 | 	 * not count toward the nr_batch_requests limit. There will always | 
 | 1015 | 	 * be some limit enforced by BLK_BATCH_TIME. | 
 | 1016 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | 	if (ioc_batching(q, ioc)) | 
 | 1018 | 		ioc->nr_batch_requests--; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1019 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 1020 | 	trace_block_getrq(q, bio, rw_flags & 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | 	return rq; | 
| Tejun Heo | b679281 | 2012-03-05 13:15:23 -0800 | [diff] [blame] | 1022 |  | 
| Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 1023 | fail_elvpriv: | 
 | 1024 | 	/* | 
 | 1025 | 	 * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed | 
 | 1026 | 	 * and may fail indefinitely under memory pressure and thus | 
 | 1027 | 	 * shouldn't stall IO.  Treat this request as !elvpriv.  This will | 
 | 1028 | 	 * disturb iosched and blkcg but weird is bettern than dead. | 
 | 1029 | 	 */ | 
 | 1030 | 	printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n", | 
 | 1031 | 			   dev_name(q->backing_dev_info.dev)); | 
 | 1032 |  | 
 | 1033 | 	rq->cmd_flags &= ~REQ_ELVPRIV; | 
 | 1034 | 	rq->elv.icq = NULL; | 
 | 1035 |  | 
 | 1036 | 	spin_lock_irq(q->queue_lock); | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 1037 | 	q->nr_rqs_elvpriv--; | 
| Tejun Heo | aaf7c68 | 2012-04-19 16:29:22 -0700 | [diff] [blame] | 1038 | 	spin_unlock_irq(q->queue_lock); | 
 | 1039 | 	goto out; | 
 | 1040 |  | 
| Tejun Heo | b679281 | 2012-03-05 13:15:23 -0800 | [diff] [blame] | 1041 | fail_alloc: | 
 | 1042 | 	/* | 
 | 1043 | 	 * Allocation failed presumably due to memory. Undo anything we | 
 | 1044 | 	 * might have messed up. | 
 | 1045 | 	 * | 
 | 1046 | 	 * Allocating task should really be put onto the front of the wait | 
 | 1047 | 	 * queue, but this is pretty rare. | 
 | 1048 | 	 */ | 
 | 1049 | 	spin_lock_irq(q->queue_lock); | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 1050 | 	freed_request(rl, rw_flags); | 
| Tejun Heo | b679281 | 2012-03-05 13:15:23 -0800 | [diff] [blame] | 1051 |  | 
 | 1052 | 	/* | 
 | 1053 | 	 * in the very unlikely event that allocation failed and no | 
 | 1054 | 	 * requests for this direction was pending, mark us starved so that | 
 | 1055 | 	 * freeing of a request in the other direction will notice | 
 | 1056 | 	 * us. another possible fix would be to split the rq mempool into | 
 | 1057 | 	 * READ and WRITE | 
 | 1058 | 	 */ | 
 | 1059 | rq_starved: | 
 | 1060 | 	if (unlikely(rl->count[is_sync] == 0)) | 
 | 1061 | 		rl->starved[is_sync] = 1; | 
 | 1062 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | } | 
 | 1064 |  | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 1065 | /** | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1066 |  * get_request - get a free request | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 1067 |  * @q: request_queue to allocate request from | 
 | 1068 |  * @rw_flags: RW and SYNC flags | 
 | 1069 |  * @bio: bio to allocate request for (can be %NULL) | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1070 |  * @gfp_mask: allocation mask | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1071 |  * | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1072 |  * Get a free request from @q.  If %__GFP_WAIT is set in @gfp_mask, this | 
 | 1073 |  * function keeps retrying under memory pressure and fails iff @q is dead. | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 1074 |  * | 
 | 1075 |  * Must be callled with @q->queue_lock held and, | 
 | 1076 |  * Returns %NULL on failure, with @q->queue_lock held. | 
 | 1077 |  * Returns !%NULL on success, with @q->queue_lock *not held*. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 |  */ | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1079 | static struct request *get_request(struct request_queue *q, int rw_flags, | 
 | 1080 | 				   struct bio *bio, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 1082 | 	const bool is_sync = rw_is_sync(rw_flags) != 0; | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1083 | 	DEFINE_WAIT(wait); | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1084 | 	struct request_list *rl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1085 | 	struct request *rq; | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1086 |  | 
 | 1087 | 	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */ | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1088 | retry: | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1089 | 	rq = __get_request(rl, rw_flags, bio, gfp_mask); | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1090 | 	if (rq) | 
 | 1091 | 		return rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 |  | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 1093 | 	if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1094 | 		blk_put_rl(rl); | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1095 | 		return NULL; | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1096 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 |  | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1098 | 	/* wait on @rl and retry */ | 
 | 1099 | 	prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | 
 | 1100 | 				  TASK_UNINTERRUPTIBLE); | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 1101 |  | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1102 | 	trace_block_sleeprq(q, bio, rw_flags & 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 |  | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1104 | 	spin_unlock_irq(q->queue_lock); | 
 | 1105 | 	io_schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1106 |  | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1107 | 	/* | 
 | 1108 | 	 * After sleeping, we become a "batching" process and will be able | 
 | 1109 | 	 * to allocate at least one request, and up to a big batch of them | 
 | 1110 | 	 * for a small period time.  See ioc_batching, ioc_set_batching | 
 | 1111 | 	 */ | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1112 | 	ioc_set_batching(q, current->io_context); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1113 |  | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1114 | 	spin_lock_irq(q->queue_lock); | 
 | 1115 | 	finish_wait(&rl->wait[is_sync], &wait); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1116 |  | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1117 | 	goto retry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | } | 
 | 1119 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1120 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 | { | 
 | 1122 | 	struct request *rq; | 
 | 1123 |  | 
 | 1124 | 	BUG_ON(rw != READ && rw != WRITE); | 
 | 1125 |  | 
| Tejun Heo | 7f4b35d | 2012-06-04 20:40:56 -0700 | [diff] [blame] | 1126 | 	/* create ioc upfront */ | 
 | 1127 | 	create_io_context(gfp_mask, q->node); | 
 | 1128 |  | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1129 | 	spin_lock_irq(q->queue_lock); | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1130 | 	rq = get_request(q, rw, NULL, gfp_mask); | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 1131 | 	if (!rq) | 
 | 1132 | 		spin_unlock_irq(q->queue_lock); | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1133 | 	/* q->queue_lock is unlocked at this point */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 |  | 
 | 1135 | 	return rq; | 
 | 1136 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1137 | EXPORT_SYMBOL(blk_get_request); | 
 | 1138 |  | 
 | 1139 | /** | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1140 |  * blk_make_request - given a bio, allocate a corresponding struct request. | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 1141 |  * @q: target request queue | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1142 |  * @bio:  The bio describing the memory mappings that will be submitted for IO. | 
 | 1143 |  *        It may be a chained-bio properly constructed by block/bio layer. | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 1144 |  * @gfp_mask: gfp flags to be used for memory allocation | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1145 |  * | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1146 |  * blk_make_request is the parallel of generic_make_request for BLOCK_PC | 
 | 1147 |  * type commands. Where the struct request needs to be farther initialized by | 
 | 1148 |  * the caller. It is passed a &struct bio, which describes the memory info of | 
 | 1149 |  * the I/O transfer. | 
 | 1150 |  * | 
 | 1151 |  * The caller of blk_make_request must make sure that bi_io_vec | 
 | 1152 |  * are set to describe the memory buffers. That bio_data_dir() will return | 
 | 1153 |  * the needed direction of the request. (And all bio's in the passed bio-chain | 
 | 1154 |  * are properly set accordingly) | 
 | 1155 |  * | 
 | 1156 |  * If called under none-sleepable conditions, mapped bio buffers must not | 
 | 1157 |  * need bouncing, by calling the appropriate masked or flagged allocator, | 
 | 1158 |  * suitable for the target device. Otherwise the call to blk_queue_bounce will | 
 | 1159 |  * BUG. | 
| Jens Axboe | 53674ac | 2009-05-19 19:52:35 +0200 | [diff] [blame] | 1160 |  * | 
 | 1161 |  * WARNING: When allocating/cloning a bio-chain, careful consideration should be | 
 | 1162 |  * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for | 
 | 1163 |  * anything but the first bio in the chain. Otherwise you risk waiting for IO | 
 | 1164 |  * completion of a bio that hasn't been submitted yet, thus resulting in a | 
 | 1165 |  * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead | 
 | 1166 |  * of bio_alloc(), as that avoids the mempool deadlock. | 
 | 1167 |  * If possible a big IO should be split into smaller parts when allocation | 
 | 1168 |  * fails. Partial allocation should not be an error, or you risk a live-lock. | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1169 |  */ | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1170 | struct request *blk_make_request(struct request_queue *q, struct bio *bio, | 
 | 1171 | 				 gfp_t gfp_mask) | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1172 | { | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1173 | 	struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); | 
 | 1174 |  | 
 | 1175 | 	if (unlikely(!rq)) | 
 | 1176 | 		return ERR_PTR(-ENOMEM); | 
 | 1177 |  | 
 | 1178 | 	for_each_bio(bio) { | 
 | 1179 | 		struct bio *bounce_bio = bio; | 
 | 1180 | 		int ret; | 
 | 1181 |  | 
 | 1182 | 		blk_queue_bounce(q, &bounce_bio); | 
 | 1183 | 		ret = blk_rq_append_bio(q, rq, bounce_bio); | 
 | 1184 | 		if (unlikely(ret)) { | 
 | 1185 | 			blk_put_request(rq); | 
 | 1186 | 			return ERR_PTR(ret); | 
 | 1187 | 		} | 
 | 1188 | 	} | 
 | 1189 |  | 
 | 1190 | 	return rq; | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1191 | } | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1192 | EXPORT_SYMBOL(blk_make_request); | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1193 |  | 
 | 1194 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 |  * blk_requeue_request - put a request back on queue | 
 | 1196 |  * @q:		request queue where request should be inserted | 
 | 1197 |  * @rq:		request to be inserted | 
 | 1198 |  * | 
 | 1199 |  * Description: | 
 | 1200 |  *    Drivers often keep queueing requests until the hardware cannot accept | 
 | 1201 |  *    more, when that condition happens we need to put the request back | 
 | 1202 |  *    on the queue. Must be called with queue lock held. | 
 | 1203 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1204 | void blk_requeue_request(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1205 | { | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 1206 | 	blk_delete_timer(rq); | 
 | 1207 | 	blk_clear_rq_complete(rq); | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 1208 | 	trace_block_rq_requeue(q, rq); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1209 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 | 	if (blk_rq_tagged(rq)) | 
 | 1211 | 		blk_queue_end_tag(q, rq); | 
 | 1212 |  | 
| James Bottomley | ba396a6 | 2009-05-27 14:17:08 +0200 | [diff] [blame] | 1213 | 	BUG_ON(blk_queued_rq(rq)); | 
 | 1214 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | 	elv_requeue_request(q, rq); | 
 | 1216 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | EXPORT_SYMBOL(blk_requeue_request); | 
 | 1218 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1219 | static void add_acct_request(struct request_queue *q, struct request *rq, | 
 | 1220 | 			     int where) | 
 | 1221 | { | 
 | 1222 | 	drive_stat_acct(rq, 1); | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 1223 | 	__elv_add_request(q, rq, where); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1224 | } | 
 | 1225 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1226 | static void part_round_stats_single(int cpu, struct hd_struct *part, | 
 | 1227 | 				    unsigned long now) | 
 | 1228 | { | 
 | 1229 | 	if (now == part->stamp) | 
 | 1230 | 		return; | 
 | 1231 |  | 
| Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 1232 | 	if (part_in_flight(part)) { | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1233 | 		__part_stat_add(cpu, part, time_in_queue, | 
| Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 1234 | 				part_in_flight(part) * (now - part->stamp)); | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1235 | 		__part_stat_add(cpu, part, io_ticks, (now - part->stamp)); | 
 | 1236 | 	} | 
 | 1237 | 	part->stamp = now; | 
 | 1238 | } | 
 | 1239 |  | 
 | 1240 | /** | 
| Randy Dunlap | 496aa8a | 2008-10-16 07:46:23 +0200 | [diff] [blame] | 1241 |  * part_round_stats() - Round off the performance stats on a struct disk_stats. | 
 | 1242 |  * @cpu: cpu number for stats access | 
 | 1243 |  * @part: target partition | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1244 |  * | 
 | 1245 |  * The average IO queue length and utilisation statistics are maintained | 
 | 1246 |  * by observing the current state of the queue length and the amount of | 
 | 1247 |  * time it has been in this state for. | 
 | 1248 |  * | 
 | 1249 |  * Normally, that accounting is done on IO completion, but that can result | 
 | 1250 |  * in more than a second's worth of IO being accounted for within any one | 
 | 1251 |  * second, leading to >100% utilisation.  To deal with that, we call this | 
 | 1252 |  * function to do a round-off before returning the results when reading | 
 | 1253 |  * /proc/diskstats.  This accounts immediately for all queue usage up to | 
 | 1254 |  * the current jiffies and restarts the counters again. | 
 | 1255 |  */ | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 1256 | void part_round_stats(int cpu, struct hd_struct *part) | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1257 | { | 
 | 1258 | 	unsigned long now = jiffies; | 
 | 1259 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1260 | 	if (part->partno) | 
 | 1261 | 		part_round_stats_single(cpu, &part_to_disk(part)->part0, now); | 
 | 1262 | 	part_round_stats_single(cpu, part, now); | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1263 | } | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1264 | EXPORT_SYMBOL_GPL(part_round_stats); | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1265 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 | /* | 
 | 1267 |  * queue lock must be held | 
 | 1268 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1269 | void __blk_put_request(struct request_queue *q, struct request *req) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1270 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1271 | 	if (unlikely(!q)) | 
 | 1272 | 		return; | 
 | 1273 | 	if (unlikely(--req->ref_count)) | 
 | 1274 | 		return; | 
 | 1275 |  | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 1276 | 	elv_completed_request(q, req); | 
 | 1277 |  | 
| Boaz Harrosh | 1cd96c2 | 2009-03-24 12:35:07 +0100 | [diff] [blame] | 1278 | 	/* this is a bio leak */ | 
 | 1279 | 	WARN_ON(req->bio != NULL); | 
 | 1280 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | 	/* | 
 | 1282 | 	 * Request may not have originated from ll_rw_blk. if not, | 
 | 1283 | 	 * it didn't come out of our reserved rq pools | 
 | 1284 | 	 */ | 
| Jens Axboe | 49171e5 | 2006-08-10 08:59:11 +0200 | [diff] [blame] | 1285 | 	if (req->cmd_flags & REQ_ALLOCED) { | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 1286 | 		unsigned int flags = req->cmd_flags; | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1287 | 		struct request_list *rl = blk_rq_rl(req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | 		BUG_ON(!list_empty(&req->queuelist)); | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 1290 | 		BUG_ON(!hlist_unhashed(&req->hash)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 |  | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1292 | 		blk_free_request(rl, req); | 
 | 1293 | 		freed_request(rl, flags); | 
 | 1294 | 		blk_put_rl(rl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | 	} | 
 | 1296 | } | 
| Mike Christie | 6e39b69 | 2005-11-11 05:30:24 -0600 | [diff] [blame] | 1297 | EXPORT_SYMBOL_GPL(__blk_put_request); | 
 | 1298 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | void blk_put_request(struct request *req) | 
 | 1300 | { | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 1301 | 	unsigned long flags; | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1302 | 	struct request_queue *q = req->q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 |  | 
| FUJITA Tomonori | 52a93ba | 2008-07-15 21:21:45 +0200 | [diff] [blame] | 1304 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 1305 | 	__blk_put_request(q, req); | 
 | 1306 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1307 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | EXPORT_SYMBOL(blk_put_request); | 
 | 1309 |  | 
| Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 1310 | /** | 
 | 1311 |  * blk_add_request_payload - add a payload to a request | 
 | 1312 |  * @rq: request to update | 
 | 1313 |  * @page: page backing the payload | 
 | 1314 |  * @len: length of the payload. | 
 | 1315 |  * | 
 | 1316 |  * This allows to later add a payload to an already submitted request by | 
 | 1317 |  * a block driver.  The driver needs to take care of freeing the payload | 
 | 1318 |  * itself. | 
 | 1319 |  * | 
 | 1320 |  * Note that this is a quite horrible hack and nothing but handling of | 
 | 1321 |  * discard requests should ever use it. | 
 | 1322 |  */ | 
 | 1323 | void blk_add_request_payload(struct request *rq, struct page *page, | 
 | 1324 | 		unsigned int len) | 
 | 1325 | { | 
 | 1326 | 	struct bio *bio = rq->bio; | 
 | 1327 |  | 
 | 1328 | 	bio->bi_io_vec->bv_page = page; | 
 | 1329 | 	bio->bi_io_vec->bv_offset = 0; | 
 | 1330 | 	bio->bi_io_vec->bv_len = len; | 
 | 1331 |  | 
 | 1332 | 	bio->bi_size = len; | 
 | 1333 | 	bio->bi_vcnt = 1; | 
 | 1334 | 	bio->bi_phys_segments = 1; | 
 | 1335 |  | 
 | 1336 | 	rq->__data_len = rq->resid_len = len; | 
 | 1337 | 	rq->nr_phys_segments = 1; | 
 | 1338 | 	rq->buffer = bio_data(bio); | 
 | 1339 | } | 
 | 1340 | EXPORT_SYMBOL_GPL(blk_add_request_payload); | 
 | 1341 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1342 | static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, | 
 | 1343 | 				   struct bio *bio) | 
 | 1344 | { | 
 | 1345 | 	const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 
 | 1346 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1347 | 	if (!ll_back_merge_fn(q, req, bio)) | 
 | 1348 | 		return false; | 
 | 1349 |  | 
| Tejun Heo | 8c1cf6b | 2013-01-11 13:06:34 -0800 | [diff] [blame] | 1350 | 	trace_block_bio_backmerge(q, req, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1351 |  | 
 | 1352 | 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | 
 | 1353 | 		blk_rq_set_mixed_merge(req); | 
 | 1354 |  | 
 | 1355 | 	req->biotail->bi_next = bio; | 
 | 1356 | 	req->biotail = bio; | 
 | 1357 | 	req->__data_len += bio->bi_size; | 
 | 1358 | 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); | 
 | 1359 |  | 
 | 1360 | 	drive_stat_acct(req, 0); | 
 | 1361 | 	return true; | 
 | 1362 | } | 
 | 1363 |  | 
 | 1364 | static bool bio_attempt_front_merge(struct request_queue *q, | 
 | 1365 | 				    struct request *req, struct bio *bio) | 
 | 1366 | { | 
 | 1367 | 	const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1368 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1369 | 	if (!ll_front_merge_fn(q, req, bio)) | 
 | 1370 | 		return false; | 
 | 1371 |  | 
| Tejun Heo | 8c1cf6b | 2013-01-11 13:06:34 -0800 | [diff] [blame] | 1372 | 	trace_block_bio_frontmerge(q, req, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1373 |  | 
 | 1374 | 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | 
 | 1375 | 		blk_rq_set_mixed_merge(req); | 
 | 1376 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1377 | 	bio->bi_next = req->bio; | 
 | 1378 | 	req->bio = bio; | 
 | 1379 |  | 
 | 1380 | 	/* | 
 | 1381 | 	 * may not be valid. if the low level driver said | 
 | 1382 | 	 * it didn't need a bounce buffer then it better | 
 | 1383 | 	 * not touch req->buffer either... | 
 | 1384 | 	 */ | 
 | 1385 | 	req->buffer = bio_data(bio); | 
 | 1386 | 	req->__sector = bio->bi_sector; | 
 | 1387 | 	req->__data_len += bio->bi_size; | 
 | 1388 | 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); | 
 | 1389 |  | 
 | 1390 | 	drive_stat_acct(req, 0); | 
 | 1391 | 	return true; | 
 | 1392 | } | 
 | 1393 |  | 
| Tejun Heo | bd87b58 | 2011-10-19 14:33:08 +0200 | [diff] [blame] | 1394 | /** | 
 | 1395 |  * attempt_plug_merge - try to merge with %current's plugged list | 
 | 1396 |  * @q: request_queue new bio is being queued at | 
 | 1397 |  * @bio: new bio being queued | 
 | 1398 |  * @request_count: out parameter for number of traversed plugged requests | 
 | 1399 |  * | 
 | 1400 |  * Determine whether @bio being queued on @q can be merged with a request | 
 | 1401 |  * on %current's plugged list.  Returns %true if merge was successful, | 
 | 1402 |  * otherwise %false. | 
 | 1403 |  * | 
| Tejun Heo | 07c2bd3 | 2012-02-08 09:19:42 +0100 | [diff] [blame] | 1404 |  * Plugging coalesces IOs from the same issuer for the same purpose without | 
 | 1405 |  * going through @q->queue_lock.  As such it's more of an issuing mechanism | 
 | 1406 |  * than scheduling, and the request, while may have elvpriv data, is not | 
 | 1407 |  * added on the elevator at this point.  In addition, we don't have | 
 | 1408 |  * reliable access to the elevator outside queue lock.  Only check basic | 
 | 1409 |  * merging parameters without querying the elevator. | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1410 |  */ | 
| Tejun Heo | bd87b58 | 2011-10-19 14:33:08 +0200 | [diff] [blame] | 1411 | static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, | 
 | 1412 | 			       unsigned int *request_count) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1413 | { | 
 | 1414 | 	struct blk_plug *plug; | 
 | 1415 | 	struct request *rq; | 
 | 1416 | 	bool ret = false; | 
 | 1417 |  | 
| Tejun Heo | bd87b58 | 2011-10-19 14:33:08 +0200 | [diff] [blame] | 1418 | 	plug = current->plug; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1419 | 	if (!plug) | 
 | 1420 | 		goto out; | 
| Shaohua Li | 56ebdaf | 2011-08-24 16:04:34 +0200 | [diff] [blame] | 1421 | 	*request_count = 0; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1422 |  | 
 | 1423 | 	list_for_each_entry_reverse(rq, &plug->list, queuelist) { | 
 | 1424 | 		int el_ret; | 
 | 1425 |  | 
| Shaohua Li | 1b2e19f | 2012-04-06 11:37:47 -0600 | [diff] [blame] | 1426 | 		if (rq->q == q) | 
 | 1427 | 			(*request_count)++; | 
| Shaohua Li | 56ebdaf | 2011-08-24 16:04:34 +0200 | [diff] [blame] | 1428 |  | 
| Tejun Heo | 07c2bd3 | 2012-02-08 09:19:42 +0100 | [diff] [blame] | 1429 | 		if (rq->q != q || !blk_rq_merge_ok(rq, bio)) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1430 | 			continue; | 
 | 1431 |  | 
| Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 1432 | 		el_ret = blk_try_merge(rq, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1433 | 		if (el_ret == ELEVATOR_BACK_MERGE) { | 
 | 1434 | 			ret = bio_attempt_back_merge(q, rq, bio); | 
 | 1435 | 			if (ret) | 
 | 1436 | 				break; | 
 | 1437 | 		} else if (el_ret == ELEVATOR_FRONT_MERGE) { | 
 | 1438 | 			ret = bio_attempt_front_merge(q, rq, bio); | 
 | 1439 | 			if (ret) | 
 | 1440 | 				break; | 
 | 1441 | 		} | 
 | 1442 | 	} | 
 | 1443 | out: | 
 | 1444 | 	return ret; | 
 | 1445 | } | 
 | 1446 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1447 | void init_request_from_bio(struct request *req, struct bio *bio) | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1448 | { | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 1449 | 	req->cmd_type = REQ_TYPE_FS; | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1450 |  | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 1451 | 	req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; | 
 | 1452 | 	if (bio->bi_rw & REQ_RAHEAD) | 
| Tejun Heo | a82afdf | 2009-07-03 17:48:16 +0900 | [diff] [blame] | 1453 | 		req->cmd_flags |= REQ_FAILFAST_MASK; | 
| Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 1454 |  | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1455 | 	req->errors = 0; | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 1456 | 	req->__sector = bio->bi_sector; | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1457 | 	req->ioprio = bio_prio(bio); | 
| NeilBrown | bc1c56f | 2007-08-16 13:31:30 +0200 | [diff] [blame] | 1458 | 	blk_rq_bio_prep(req->q, req, bio); | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1459 | } | 
 | 1460 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1461 | void blk_queue_bio(struct request_queue *q, struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 | { | 
| Jiri Slaby | 5e00d1b | 2010-08-12 14:31:06 +0200 | [diff] [blame] | 1463 | 	const bool sync = !!(bio->bi_rw & REQ_SYNC); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1464 | 	struct blk_plug *plug; | 
 | 1465 | 	int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; | 
 | 1466 | 	struct request *req; | 
| Shaohua Li | 56ebdaf | 2011-08-24 16:04:34 +0200 | [diff] [blame] | 1467 | 	unsigned int request_count = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1469 | 	/* | 
 | 1470 | 	 * low level driver can indicate that it wants pages above a | 
 | 1471 | 	 * certain limit bounced to low memory (ie for highmem, or even | 
 | 1472 | 	 * ISA dma in theory) | 
 | 1473 | 	 */ | 
 | 1474 | 	blk_queue_bounce(q, &bio); | 
 | 1475 |  | 
| Darrick J. Wong | ffecfd1 | 2013-02-21 16:42:55 -0800 | [diff] [blame] | 1476 | 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { | 
 | 1477 | 		bio_endio(bio, -EIO); | 
 | 1478 | 		return; | 
 | 1479 | 	} | 
 | 1480 |  | 
| Tejun Heo | 4fed947 | 2010-09-03 11:56:17 +0200 | [diff] [blame] | 1481 | 	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1482 | 		spin_lock_irq(q->queue_lock); | 
| Tejun Heo | ae1b153 | 2011-01-25 12:43:54 +0100 | [diff] [blame] | 1483 | 		where = ELEVATOR_INSERT_FLUSH; | 
| Tejun Heo | 28e7d18 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1484 | 		goto get_rq; | 
 | 1485 | 	} | 
 | 1486 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1487 | 	/* | 
 | 1488 | 	 * Check if we can merge with the plugged list before grabbing | 
 | 1489 | 	 * any locks. | 
 | 1490 | 	 */ | 
| Tejun Heo | bd87b58 | 2011-10-19 14:33:08 +0200 | [diff] [blame] | 1491 | 	if (attempt_plug_merge(q, bio, &request_count)) | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1492 | 		return; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1493 |  | 
 | 1494 | 	spin_lock_irq(q->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1495 |  | 
 | 1496 | 	el_ret = elv_merge(q, &req, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1497 | 	if (el_ret == ELEVATOR_BACK_MERGE) { | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1498 | 		if (bio_attempt_back_merge(q, req, bio)) { | 
| Tejun Heo | 07c2bd3 | 2012-02-08 09:19:42 +0100 | [diff] [blame] | 1499 | 			elv_bio_merged(q, req, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1500 | 			if (!attempt_back_merge(q, req)) | 
 | 1501 | 				elv_merged_request(q, req, el_ret); | 
 | 1502 | 			goto out_unlock; | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1503 | 		} | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1504 | 	} else if (el_ret == ELEVATOR_FRONT_MERGE) { | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1505 | 		if (bio_attempt_front_merge(q, req, bio)) { | 
| Tejun Heo | 07c2bd3 | 2012-02-08 09:19:42 +0100 | [diff] [blame] | 1506 | 			elv_bio_merged(q, req, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1507 | 			if (!attempt_front_merge(q, req)) | 
 | 1508 | 				elv_merged_request(q, req, el_ret); | 
 | 1509 | 			goto out_unlock; | 
 | 1510 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1511 | 	} | 
 | 1512 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | get_rq: | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1514 | 	/* | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 1515 | 	 * This sync check and mask will be re-done in init_request_from_bio(), | 
 | 1516 | 	 * but we need to set it earlier to expose the sync flag to the | 
 | 1517 | 	 * rq allocator and io schedulers. | 
 | 1518 | 	 */ | 
 | 1519 | 	rw_flags = bio_data_dir(bio); | 
 | 1520 | 	if (sync) | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 1521 | 		rw_flags |= REQ_SYNC; | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 1522 |  | 
 | 1523 | 	/* | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1524 | 	 * Grab a free request. This is might sleep but can not fail. | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1525 | 	 * Returns with the queue unlocked. | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1526 | 	 */ | 
| Tejun Heo | a06e05e | 2012-06-04 20:40:55 -0700 | [diff] [blame] | 1527 | 	req = get_request(q, rw_flags, bio, GFP_NOIO); | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 1528 | 	if (unlikely(!req)) { | 
 | 1529 | 		bio_endio(bio, -ENODEV);	/* @q is dead */ | 
 | 1530 | 		goto out_unlock; | 
 | 1531 | 	} | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1532 |  | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1533 | 	/* | 
 | 1534 | 	 * After dropping the lock and possibly sleeping here, our request | 
 | 1535 | 	 * may now be mergeable after it had proven unmergeable (above). | 
 | 1536 | 	 * We don't worry about that case for efficiency. It won't happen | 
 | 1537 | 	 * often, and the elevators are able to handle it. | 
 | 1538 | 	 */ | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1539 | 	init_request_from_bio(req, bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 |  | 
| Tao Ma | 9562ad9 | 2011-10-24 16:11:30 +0200 | [diff] [blame] | 1541 | 	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) | 
| Jens Axboe | 11ccf11 | 2011-07-26 15:01:15 +0200 | [diff] [blame] | 1542 | 		req->cpu = raw_smp_processor_id(); | 
| Tejun Heo | dd83100 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1543 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1544 | 	plug = current->plug; | 
| Jens Axboe | 721a960 | 2011-03-09 11:56:30 +0100 | [diff] [blame] | 1545 | 	if (plug) { | 
| Jens Axboe | dc6d36c | 2011-04-12 10:28:28 +0200 | [diff] [blame] | 1546 | 		/* | 
 | 1547 | 		 * If this is the first request added after a plug, fire | 
 | 1548 | 		 * of a plug trace. If others have been added before, check | 
 | 1549 | 		 * if we have multiple devices in this plug. If so, make a | 
 | 1550 | 		 * note to sort the list before dispatch. | 
 | 1551 | 		 */ | 
 | 1552 | 		if (list_empty(&plug->list)) | 
 | 1553 | 			trace_block_plug(q); | 
| Shaohua Li | 3540d5e | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1554 | 		else { | 
| Shaohua Li | 019ceb7 | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1555 | 			if (request_count >= BLK_MAX_REQUEST_COUNT) { | 
| Shaohua Li | 3540d5e | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1556 | 				blk_flush_plug_list(plug, false); | 
| Shaohua Li | 019ceb7 | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1557 | 				trace_block_plug(q); | 
 | 1558 | 			} | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1559 | 		} | 
| Shaohua Li | a632716 | 2011-08-24 16:04:32 +0200 | [diff] [blame] | 1560 | 		list_add_tail(&req->queuelist, &plug->list); | 
 | 1561 | 		drive_stat_acct(req, 1); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1562 | 	} else { | 
 | 1563 | 		spin_lock_irq(q->queue_lock); | 
 | 1564 | 		add_acct_request(q, req, where); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 1565 | 		__blk_run_queue(q); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1566 | out_unlock: | 
 | 1567 | 		spin_unlock_irq(q->queue_lock); | 
 | 1568 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1569 | } | 
| Jens Axboe | c20e8de | 2011-09-12 12:03:37 +0200 | [diff] [blame] | 1570 | EXPORT_SYMBOL_GPL(blk_queue_bio);	/* for device mapper only */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1571 |  | 
 | 1572 | /* | 
 | 1573 |  * If bio->bi_dev is a partition, remap the location | 
 | 1574 |  */ | 
 | 1575 | static inline void blk_partition_remap(struct bio *bio) | 
 | 1576 | { | 
 | 1577 | 	struct block_device *bdev = bio->bi_bdev; | 
 | 1578 |  | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1579 | 	if (bio_sectors(bio) && bdev != bdev->bd_contains) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 | 		struct hd_struct *p = bdev->bd_part; | 
| Jens Axboe | a362357 | 2005-11-01 09:26:16 +0100 | [diff] [blame] | 1581 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | 		bio->bi_sector += p->start_sect; | 
 | 1583 | 		bio->bi_bdev = bdev->bd_contains; | 
| Alan D. Brunelle | c7149d6 | 2007-08-07 15:30:23 +0200 | [diff] [blame] | 1584 |  | 
| Mike Snitzer | d07335e | 2010-11-16 12:52:38 +0100 | [diff] [blame] | 1585 | 		trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, | 
 | 1586 | 				      bdev->bd_dev, | 
 | 1587 | 				      bio->bi_sector - p->start_sect); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | 	} | 
 | 1589 | } | 
 | 1590 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | static void handle_bad_sector(struct bio *bio) | 
 | 1592 | { | 
 | 1593 | 	char b[BDEVNAME_SIZE]; | 
 | 1594 |  | 
 | 1595 | 	printk(KERN_INFO "attempt to access beyond end of device\n"); | 
 | 1596 | 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", | 
 | 1597 | 			bdevname(bio->bi_bdev, b), | 
 | 1598 | 			bio->bi_rw, | 
 | 1599 | 			(unsigned long long)bio->bi_sector + bio_sectors(bio), | 
| Mike Snitzer | 77304d2 | 2010-11-08 14:39:12 +0100 | [diff] [blame] | 1600 | 			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 |  | 
 | 1602 | 	set_bit(BIO_EOF, &bio->bi_flags); | 
 | 1603 | } | 
 | 1604 |  | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1605 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 
 | 1606 |  | 
 | 1607 | static DECLARE_FAULT_ATTR(fail_make_request); | 
 | 1608 |  | 
 | 1609 | static int __init setup_fail_make_request(char *str) | 
 | 1610 | { | 
 | 1611 | 	return setup_fault_attr(&fail_make_request, str); | 
 | 1612 | } | 
 | 1613 | __setup("fail_make_request=", setup_fail_make_request); | 
 | 1614 |  | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1615 | static bool should_fail_request(struct hd_struct *part, unsigned int bytes) | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1616 | { | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1617 | 	return part->make_it_fail && should_fail(&fail_make_request, bytes); | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1618 | } | 
 | 1619 |  | 
 | 1620 | static int __init fail_make_request_debugfs(void) | 
 | 1621 | { | 
| Akinobu Mita | dd48c08 | 2011-08-03 16:21:01 -0700 | [diff] [blame] | 1622 | 	struct dentry *dir = fault_create_debugfs_attr("fail_make_request", | 
 | 1623 | 						NULL, &fail_make_request); | 
 | 1624 |  | 
 | 1625 | 	return IS_ERR(dir) ? PTR_ERR(dir) : 0; | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1626 | } | 
 | 1627 |  | 
 | 1628 | late_initcall(fail_make_request_debugfs); | 
 | 1629 |  | 
 | 1630 | #else /* CONFIG_FAIL_MAKE_REQUEST */ | 
 | 1631 |  | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1632 | static inline bool should_fail_request(struct hd_struct *part, | 
 | 1633 | 					unsigned int bytes) | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1634 | { | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1635 | 	return false; | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1636 | } | 
 | 1637 |  | 
 | 1638 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ | 
 | 1639 |  | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1640 | /* | 
 | 1641 |  * Check whether this bio extends beyond the end of the device. | 
 | 1642 |  */ | 
 | 1643 | static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | 
 | 1644 | { | 
 | 1645 | 	sector_t maxsector; | 
 | 1646 |  | 
 | 1647 | 	if (!nr_sectors) | 
 | 1648 | 		return 0; | 
 | 1649 |  | 
 | 1650 | 	/* Test device or partition size, when known. */ | 
| Mike Snitzer | 77304d2 | 2010-11-08 14:39:12 +0100 | [diff] [blame] | 1651 | 	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1652 | 	if (maxsector) { | 
 | 1653 | 		sector_t sector = bio->bi_sector; | 
 | 1654 |  | 
 | 1655 | 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { | 
 | 1656 | 			/* | 
 | 1657 | 			 * This may well happen - the kernel calls bread() | 
 | 1658 | 			 * without checking the size of the device, e.g., when | 
 | 1659 | 			 * mounting a device. | 
 | 1660 | 			 */ | 
 | 1661 | 			handle_bad_sector(bio); | 
 | 1662 | 			return 1; | 
 | 1663 | 		} | 
 | 1664 | 	} | 
 | 1665 |  | 
 | 1666 | 	return 0; | 
 | 1667 | } | 
 | 1668 |  | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1669 | static noinline_for_stack bool | 
 | 1670 | generic_make_request_checks(struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1672 | 	struct request_queue *q; | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1673 | 	int nr_sectors = bio_sectors(bio); | 
| Jens Axboe | 51fd77b | 2007-11-02 08:49:08 +0100 | [diff] [blame] | 1674 | 	int err = -EIO; | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1675 | 	char b[BDEVNAME_SIZE]; | 
 | 1676 | 	struct hd_struct *part; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 |  | 
 | 1678 | 	might_sleep(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1679 |  | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1680 | 	if (bio_check_eod(bio, nr_sectors)) | 
 | 1681 | 		goto end_io; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1682 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1683 | 	q = bdev_get_queue(bio->bi_bdev); | 
 | 1684 | 	if (unlikely(!q)) { | 
 | 1685 | 		printk(KERN_ERR | 
 | 1686 | 		       "generic_make_request: Trying to access " | 
 | 1687 | 			"nonexistent block-device %s (%Lu)\n", | 
 | 1688 | 			bdevname(bio->bi_bdev, b), | 
 | 1689 | 			(long long) bio->bi_sector); | 
 | 1690 | 		goto end_io; | 
 | 1691 | 	} | 
 | 1692 |  | 
| Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 1693 | 	if (likely(bio_is_rw(bio) && | 
 | 1694 | 		   nr_sectors > queue_max_hw_sectors(q))) { | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1695 | 		printk(KERN_ERR "bio too big device %s (%u > %u)\n", | 
 | 1696 | 		       bdevname(bio->bi_bdev, b), | 
 | 1697 | 		       bio_sectors(bio), | 
 | 1698 | 		       queue_max_hw_sectors(q)); | 
 | 1699 | 		goto end_io; | 
 | 1700 | 	} | 
 | 1701 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1702 | 	part = bio->bi_bdev->bd_part; | 
 | 1703 | 	if (should_fail_request(part, bio->bi_size) || | 
 | 1704 | 	    should_fail_request(&part_to_disk(part)->part0, | 
 | 1705 | 				bio->bi_size)) | 
 | 1706 | 		goto end_io; | 
 | 1707 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1708 | 	/* | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1709 | 	 * If this device has partitions, remap block n | 
 | 1710 | 	 * of partition p to block n+start(p) of the disk. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 | 	 */ | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1712 | 	blk_partition_remap(bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1713 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1714 | 	if (bio_check_eod(bio, nr_sectors)) | 
 | 1715 | 		goto end_io; | 
 | 1716 |  | 
 | 1717 | 	/* | 
 | 1718 | 	 * Filter flush bio's early so that make_request based | 
 | 1719 | 	 * drivers without flush support don't have to worry | 
 | 1720 | 	 * about them. | 
 | 1721 | 	 */ | 
 | 1722 | 	if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { | 
 | 1723 | 		bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | 
 | 1724 | 		if (!nr_sectors) { | 
 | 1725 | 			err = 0; | 
| Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 1726 | 			goto end_io; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1727 | 		} | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1728 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1729 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1730 | 	if ((bio->bi_rw & REQ_DISCARD) && | 
 | 1731 | 	    (!blk_queue_discard(q) || | 
| Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 1732 | 	     ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1733 | 		err = -EOPNOTSUPP; | 
 | 1734 | 		goto end_io; | 
 | 1735 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1736 |  | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 1737 | 	if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1738 | 		err = -EOPNOTSUPP; | 
 | 1739 | 		goto end_io; | 
 | 1740 | 	} | 
 | 1741 |  | 
| Tejun Heo | 7f4b35d | 2012-06-04 20:40:56 -0700 | [diff] [blame] | 1742 | 	/* | 
 | 1743 | 	 * Various block parts want %current->io_context and lazy ioc | 
 | 1744 | 	 * allocation ends up trading a lot of pain for a small amount of | 
 | 1745 | 	 * memory.  Just allocate it upfront.  This may fail and block | 
 | 1746 | 	 * layer knows how to live with it. | 
 | 1747 | 	 */ | 
 | 1748 | 	create_io_context(GFP_ATOMIC, q->node); | 
 | 1749 |  | 
| Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1750 | 	if (blk_throtl_bio(q, bio)) | 
 | 1751 | 		return false;	/* throttled, will be resubmitted later */ | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1752 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1753 | 	trace_block_bio_queue(q, bio); | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1754 | 	return true; | 
| Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 1755 |  | 
 | 1756 | end_io: | 
 | 1757 | 	bio_endio(bio, err); | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1758 | 	return false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | } | 
 | 1760 |  | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1761 | /** | 
 | 1762 |  * generic_make_request - hand a buffer to its device driver for I/O | 
 | 1763 |  * @bio:  The bio describing the location in memory and on the device. | 
 | 1764 |  * | 
 | 1765 |  * generic_make_request() is used to make I/O requests of block | 
 | 1766 |  * devices. It is passed a &struct bio, which describes the I/O that needs | 
 | 1767 |  * to be done. | 
 | 1768 |  * | 
 | 1769 |  * generic_make_request() does not return any status.  The | 
 | 1770 |  * success/failure status of the request, along with notification of | 
 | 1771 |  * completion, is delivered asynchronously through the bio->bi_end_io | 
 | 1772 |  * function described (one day) else where. | 
 | 1773 |  * | 
 | 1774 |  * The caller of generic_make_request must make sure that bi_io_vec | 
 | 1775 |  * are set to describe the memory buffer, and that bi_dev and bi_sector are | 
 | 1776 |  * set to describe the device address, and the | 
 | 1777 |  * bi_end_io and optionally bi_private are set to describe how | 
 | 1778 |  * completion notification should be signaled. | 
 | 1779 |  * | 
 | 1780 |  * generic_make_request and the drivers it calls may use bi_next if this | 
 | 1781 |  * bio happens to be merged with someone else, and may resubmit the bio to | 
 | 1782 |  * a lower device by calling into generic_make_request recursively, which | 
 | 1783 |  * means the bio should NOT be touched after the call to ->make_request_fn. | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1784 |  */ | 
 | 1785 | void generic_make_request(struct bio *bio) | 
 | 1786 | { | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1787 | 	struct bio_list bio_list_on_stack; | 
 | 1788 |  | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1789 | 	if (!generic_make_request_checks(bio)) | 
 | 1790 | 		return; | 
 | 1791 |  | 
 | 1792 | 	/* | 
 | 1793 | 	 * We only want one ->make_request_fn to be active at a time, else | 
 | 1794 | 	 * stack usage with stacked devices could be a problem.  So use | 
 | 1795 | 	 * current->bio_list to keep a list of requests submited by a | 
 | 1796 | 	 * make_request_fn function.  current->bio_list is also used as a | 
 | 1797 | 	 * flag to say if generic_make_request is currently active in this | 
 | 1798 | 	 * task or not.  If it is NULL, then no make_request is active.  If | 
 | 1799 | 	 * it is non-NULL, then a make_request is active, and new requests | 
 | 1800 | 	 * should be added at the tail | 
 | 1801 | 	 */ | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1802 | 	if (current->bio_list) { | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1803 | 		bio_list_add(current->bio_list, bio); | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1804 | 		return; | 
 | 1805 | 	} | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1806 |  | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1807 | 	/* following loop may be a bit non-obvious, and so deserves some | 
 | 1808 | 	 * explanation. | 
 | 1809 | 	 * Before entering the loop, bio->bi_next is NULL (as all callers | 
 | 1810 | 	 * ensure that) so we have a list with a single bio. | 
 | 1811 | 	 * We pretend that we have just taken it off a longer list, so | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1812 | 	 * we assign bio_list to a pointer to the bio_list_on_stack, | 
 | 1813 | 	 * thus initialising the bio_list of new bios to be | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1814 | 	 * added.  ->make_request() may indeed add some more bios | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1815 | 	 * through a recursive call to generic_make_request.  If it | 
 | 1816 | 	 * did, we find a non-NULL value in bio_list and re-enter the loop | 
 | 1817 | 	 * from the top.  In this case we really did just take the bio | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1818 | 	 * of the top of the list (no pretending) and so remove it from | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1819 | 	 * bio_list, and call into ->make_request() again. | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1820 | 	 */ | 
 | 1821 | 	BUG_ON(bio->bi_next); | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1822 | 	bio_list_init(&bio_list_on_stack); | 
 | 1823 | 	current->bio_list = &bio_list_on_stack; | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1824 | 	do { | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1825 | 		struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 
 | 1826 |  | 
 | 1827 | 		q->make_request_fn(q, bio); | 
 | 1828 |  | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1829 | 		bio = bio_list_pop(current->bio_list); | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1830 | 	} while (bio); | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1831 | 	current->bio_list = NULL; /* deactivate */ | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1832 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1833 | EXPORT_SYMBOL(generic_make_request); | 
 | 1834 |  | 
 | 1835 | /** | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 1836 |  * submit_bio - submit a bio to the block device layer for I/O | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1837 |  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | 
 | 1838 |  * @bio: The &struct bio which describes the I/O | 
 | 1839 |  * | 
 | 1840 |  * submit_bio() is very similar in purpose to generic_make_request(), and | 
 | 1841 |  * uses that function to do most of the work. Both are fairly rough | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 1842 |  * interfaces; @bio must be presetup and ready for I/O. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1843 |  * | 
 | 1844 |  */ | 
 | 1845 | void submit_bio(int rw, struct bio *bio) | 
 | 1846 | { | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1847 | 	bio->bi_rw |= rw; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1848 |  | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1849 | 	/* | 
 | 1850 | 	 * If it's a regular read/write or a barrier with data attached, | 
 | 1851 | 	 * go through the normal accounting stuff before submission. | 
 | 1852 | 	 */ | 
| Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 1853 | 	if (bio_has_data(bio)) { | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 1854 | 		unsigned int count; | 
 | 1855 |  | 
 | 1856 | 		if (unlikely(rw & REQ_WRITE_SAME)) | 
 | 1857 | 			count = bdev_logical_block_size(bio->bi_bdev) >> 9; | 
 | 1858 | 		else | 
 | 1859 | 			count = bio_sectors(bio); | 
 | 1860 |  | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1861 | 		if (rw & WRITE) { | 
 | 1862 | 			count_vm_events(PGPGOUT, count); | 
 | 1863 | 		} else { | 
 | 1864 | 			task_io_account_read(bio->bi_size); | 
 | 1865 | 			count_vm_events(PGPGIN, count); | 
 | 1866 | 		} | 
 | 1867 |  | 
 | 1868 | 		if (unlikely(block_dump)) { | 
 | 1869 | 			char b[BDEVNAME_SIZE]; | 
| San Mehat | 8dcbdc7 | 2010-09-14 08:48:01 +0200 | [diff] [blame] | 1870 | 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 1871 | 			current->comm, task_pid_nr(current), | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1872 | 				(rw & WRITE) ? "WRITE" : "READ", | 
 | 1873 | 				(unsigned long long)bio->bi_sector, | 
| San Mehat | 8dcbdc7 | 2010-09-14 08:48:01 +0200 | [diff] [blame] | 1874 | 				bdevname(bio->bi_bdev, b), | 
 | 1875 | 				count); | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1876 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1877 | 	} | 
 | 1878 |  | 
 | 1879 | 	generic_make_request(bio); | 
 | 1880 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1881 | EXPORT_SYMBOL(submit_bio); | 
 | 1882 |  | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1883 | /** | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1884 |  * blk_rq_check_limits - Helper function to check a request for the queue limit | 
 | 1885 |  * @q:  the queue | 
 | 1886 |  * @rq: the request being checked | 
 | 1887 |  * | 
 | 1888 |  * Description: | 
 | 1889 |  *    @rq may have been made based on weaker limitations of upper-level queues | 
 | 1890 |  *    in request stacking drivers, and it may violate the limitation of @q. | 
 | 1891 |  *    Since the block layer and the underlying device driver trust @rq | 
 | 1892 |  *    after it is inserted to @q, it should be checked against @q before | 
 | 1893 |  *    the insertion using this generic function. | 
 | 1894 |  * | 
 | 1895 |  *    This function should also be useful for request stacking drivers | 
| Stefan Weil | eef35c2 | 2010-08-06 21:11:15 +0200 | [diff] [blame] | 1896 |  *    in some cases below, so export this function. | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1897 |  *    Request stacking drivers like request-based dm may change the queue | 
 | 1898 |  *    limits while requests are in the queue (e.g. dm's table swapping). | 
 | 1899 |  *    Such request stacking drivers should check those requests agaist | 
 | 1900 |  *    the new queue limits again when they dispatch those requests, | 
 | 1901 |  *    although such checkings are also done against the old queue limits | 
 | 1902 |  *    when submitting requests. | 
 | 1903 |  */ | 
 | 1904 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) | 
 | 1905 | { | 
| Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 1906 | 	if (!rq_mergeable(rq)) | 
| ike Snitzer | 3383977 | 2010-08-08 12:11:33 -0400 | [diff] [blame] | 1907 | 		return 0; | 
 | 1908 |  | 
| Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 1909 | 	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1910 | 		printk(KERN_ERR "%s: over max size limit.\n", __func__); | 
 | 1911 | 		return -EIO; | 
 | 1912 | 	} | 
 | 1913 |  | 
 | 1914 | 	/* | 
 | 1915 | 	 * queue's settings related to segment counting like q->bounce_pfn | 
 | 1916 | 	 * may differ from that of other stacking queues. | 
 | 1917 | 	 * Recalculate it to check the request correctly on this queue's | 
 | 1918 | 	 * limitation. | 
 | 1919 | 	 */ | 
 | 1920 | 	blk_recalc_rq_segments(rq); | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1921 | 	if (rq->nr_phys_segments > queue_max_segments(q)) { | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1922 | 		printk(KERN_ERR "%s: over max segments limit.\n", __func__); | 
 | 1923 | 		return -EIO; | 
 | 1924 | 	} | 
 | 1925 |  | 
 | 1926 | 	return 0; | 
 | 1927 | } | 
 | 1928 | EXPORT_SYMBOL_GPL(blk_rq_check_limits); | 
 | 1929 |  | 
 | 1930 | /** | 
 | 1931 |  * blk_insert_cloned_request - Helper for stacking drivers to submit a request | 
 | 1932 |  * @q:  the queue to submit the request | 
 | 1933 |  * @rq: the request being queued | 
 | 1934 |  */ | 
 | 1935 | int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | 
 | 1936 | { | 
 | 1937 | 	unsigned long flags; | 
| Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 1938 | 	int where = ELEVATOR_INSERT_BACK; | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1939 |  | 
 | 1940 | 	if (blk_rq_check_limits(q, rq)) | 
 | 1941 | 		return -EIO; | 
 | 1942 |  | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1943 | 	if (rq->rq_disk && | 
 | 1944 | 	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1945 | 		return -EIO; | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1946 |  | 
 | 1947 | 	spin_lock_irqsave(q->queue_lock, flags); | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 1948 | 	if (unlikely(blk_queue_dying(q))) { | 
| Tejun Heo | 8ba6143 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 1949 | 		spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 1950 | 		return -ENODEV; | 
 | 1951 | 	} | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1952 |  | 
 | 1953 | 	/* | 
 | 1954 | 	 * Submitting request must be dequeued before calling this function | 
 | 1955 | 	 * because it will be linked to another request_queue | 
 | 1956 | 	 */ | 
 | 1957 | 	BUG_ON(blk_queued_rq(rq)); | 
 | 1958 |  | 
| Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 1959 | 	if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) | 
 | 1960 | 		where = ELEVATOR_INSERT_FLUSH; | 
 | 1961 |  | 
 | 1962 | 	add_acct_request(q, rq, where); | 
| Jeff Moyer | e67b77c | 2011-10-17 12:57:23 +0200 | [diff] [blame] | 1963 | 	if (where == ELEVATOR_INSERT_FLUSH) | 
 | 1964 | 		__blk_run_queue(q); | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1965 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 1966 |  | 
 | 1967 | 	return 0; | 
 | 1968 | } | 
 | 1969 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 
 | 1970 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1971 | /** | 
 | 1972 |  * blk_rq_err_bytes - determine number of bytes till the next failure boundary | 
 | 1973 |  * @rq: request to examine | 
 | 1974 |  * | 
 | 1975 |  * Description: | 
 | 1976 |  *     A request could be merge of IOs which require different failure | 
 | 1977 |  *     handling.  This function determines the number of bytes which | 
 | 1978 |  *     can be failed from the beginning of the request without | 
 | 1979 |  *     crossing into area which need to be retried further. | 
 | 1980 |  * | 
 | 1981 |  * Return: | 
 | 1982 |  *     The number of bytes to fail. | 
 | 1983 |  * | 
 | 1984 |  * Context: | 
 | 1985 |  *     queue_lock must be held. | 
 | 1986 |  */ | 
 | 1987 | unsigned int blk_rq_err_bytes(const struct request *rq) | 
 | 1988 | { | 
 | 1989 | 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | 
 | 1990 | 	unsigned int bytes = 0; | 
 | 1991 | 	struct bio *bio; | 
 | 1992 |  | 
 | 1993 | 	if (!(rq->cmd_flags & REQ_MIXED_MERGE)) | 
 | 1994 | 		return blk_rq_bytes(rq); | 
 | 1995 |  | 
 | 1996 | 	/* | 
 | 1997 | 	 * Currently the only 'mixing' which can happen is between | 
 | 1998 | 	 * different fastfail types.  We can safely fail portions | 
 | 1999 | 	 * which have all the failfast bits that the first one has - | 
 | 2000 | 	 * the ones which are at least as eager to fail as the first | 
 | 2001 | 	 * one. | 
 | 2002 | 	 */ | 
 | 2003 | 	for (bio = rq->bio; bio; bio = bio->bi_next) { | 
 | 2004 | 		if ((bio->bi_rw & ff) != ff) | 
 | 2005 | 			break; | 
 | 2006 | 		bytes += bio->bi_size; | 
 | 2007 | 	} | 
 | 2008 |  | 
 | 2009 | 	/* this could lead to infinite loop */ | 
 | 2010 | 	BUG_ON(blk_rq_bytes(rq) && !bytes); | 
 | 2011 | 	return bytes; | 
 | 2012 | } | 
 | 2013 | EXPORT_SYMBOL_GPL(blk_rq_err_bytes); | 
 | 2014 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2015 | static void blk_account_io_completion(struct request *req, unsigned int bytes) | 
 | 2016 | { | 
| Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 2017 | 	if (blk_do_io_stat(req)) { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2018 | 		const int rw = rq_data_dir(req); | 
 | 2019 | 		struct hd_struct *part; | 
 | 2020 | 		int cpu; | 
 | 2021 |  | 
 | 2022 | 		cpu = part_stat_lock(); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 2023 | 		part = req->part; | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2024 | 		part_stat_add(cpu, part, sectors[rw], bytes >> 9); | 
 | 2025 | 		part_stat_unlock(); | 
 | 2026 | 	} | 
 | 2027 | } | 
 | 2028 |  | 
 | 2029 | static void blk_account_io_done(struct request *req) | 
 | 2030 | { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2031 | 	/* | 
| Tejun Heo | dd4c133 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 2032 | 	 * Account IO completion.  flush_rq isn't accounted as a | 
 | 2033 | 	 * normal IO on queueing nor completion.  Accounting the | 
 | 2034 | 	 * containing request is enough. | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2035 | 	 */ | 
| Tejun Heo | 414b4ff | 2011-01-25 12:43:49 +0100 | [diff] [blame] | 2036 | 	if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2037 | 		unsigned long duration = jiffies - req->start_time; | 
 | 2038 | 		const int rw = rq_data_dir(req); | 
 | 2039 | 		struct hd_struct *part; | 
 | 2040 | 		int cpu; | 
 | 2041 |  | 
 | 2042 | 		cpu = part_stat_lock(); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 2043 | 		part = req->part; | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2044 |  | 
 | 2045 | 		part_stat_inc(cpu, part, ios[rw]); | 
 | 2046 | 		part_stat_add(cpu, part, ticks[rw], duration); | 
 | 2047 | 		part_round_stats(cpu, part); | 
| Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 2048 | 		part_dec_in_flight(part, rw); | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2049 |  | 
| Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 2050 | 		hd_struct_put(part); | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2051 | 		part_stat_unlock(); | 
 | 2052 | 	} | 
 | 2053 | } | 
 | 2054 |  | 
| Tejun Heo | 53a0880 | 2008-12-03 12:41:26 +0100 | [diff] [blame] | 2055 | /** | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2056 |  * blk_peek_request - peek at the top of a request queue | 
 | 2057 |  * @q: request queue to peek at | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2058 |  * | 
 | 2059 |  * Description: | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2060 |  *     Return the request at the top of @q.  The returned request | 
 | 2061 |  *     should be started using blk_start_request() before LLD starts | 
 | 2062 |  *     processing it. | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2063 |  * | 
 | 2064 |  * Return: | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2065 |  *     Pointer to the request at the top of @q if available.  Null | 
 | 2066 |  *     otherwise. | 
 | 2067 |  * | 
 | 2068 |  * Context: | 
 | 2069 |  *     queue_lock must be held. | 
 | 2070 |  */ | 
 | 2071 | struct request *blk_peek_request(struct request_queue *q) | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2072 | { | 
 | 2073 | 	struct request *rq; | 
 | 2074 | 	int ret; | 
 | 2075 |  | 
 | 2076 | 	while ((rq = __elv_next_request(q)) != NULL) { | 
 | 2077 | 		if (!(rq->cmd_flags & REQ_STARTED)) { | 
 | 2078 | 			/* | 
 | 2079 | 			 * This is the first time the device driver | 
 | 2080 | 			 * sees this request (possibly after | 
 | 2081 | 			 * requeueing).  Notify IO scheduler. | 
 | 2082 | 			 */ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2083 | 			if (rq->cmd_flags & REQ_SORTED) | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2084 | 				elv_activate_rq(q, rq); | 
 | 2085 |  | 
 | 2086 | 			/* | 
 | 2087 | 			 * just mark as started even if we don't start | 
 | 2088 | 			 * it, a request that has been delayed should | 
 | 2089 | 			 * not be passed by new incoming requests | 
 | 2090 | 			 */ | 
 | 2091 | 			rq->cmd_flags |= REQ_STARTED; | 
 | 2092 | 			trace_block_rq_issue(q, rq); | 
 | 2093 | 		} | 
 | 2094 |  | 
 | 2095 | 		if (!q->boundary_rq || q->boundary_rq == rq) { | 
 | 2096 | 			q->end_sector = rq_end_sector(rq); | 
 | 2097 | 			q->boundary_rq = NULL; | 
 | 2098 | 		} | 
 | 2099 |  | 
 | 2100 | 		if (rq->cmd_flags & REQ_DONTPREP) | 
 | 2101 | 			break; | 
 | 2102 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2103 | 		if (q->dma_drain_size && blk_rq_bytes(rq)) { | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2104 | 			/* | 
 | 2105 | 			 * make sure space for the drain appears we | 
 | 2106 | 			 * know we can do this because max_hw_segments | 
 | 2107 | 			 * has been adjusted to be one fewer than the | 
 | 2108 | 			 * device can handle | 
 | 2109 | 			 */ | 
 | 2110 | 			rq->nr_phys_segments++; | 
 | 2111 | 		} | 
 | 2112 |  | 
 | 2113 | 		if (!q->prep_rq_fn) | 
 | 2114 | 			break; | 
 | 2115 |  | 
 | 2116 | 		ret = q->prep_rq_fn(q, rq); | 
 | 2117 | 		if (ret == BLKPREP_OK) { | 
 | 2118 | 			break; | 
 | 2119 | 		} else if (ret == BLKPREP_DEFER) { | 
 | 2120 | 			/* | 
 | 2121 | 			 * the request may have been (partially) prepped. | 
 | 2122 | 			 * we need to keep this request in the front to | 
 | 2123 | 			 * avoid resource deadlock.  REQ_STARTED will | 
 | 2124 | 			 * prevent other fs requests from passing this one. | 
 | 2125 | 			 */ | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2126 | 			if (q->dma_drain_size && blk_rq_bytes(rq) && | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2127 | 			    !(rq->cmd_flags & REQ_DONTPREP)) { | 
 | 2128 | 				/* | 
 | 2129 | 				 * remove the space for the drain we added | 
 | 2130 | 				 * so that we don't add it again | 
 | 2131 | 				 */ | 
 | 2132 | 				--rq->nr_phys_segments; | 
 | 2133 | 			} | 
 | 2134 |  | 
 | 2135 | 			rq = NULL; | 
 | 2136 | 			break; | 
 | 2137 | 		} else if (ret == BLKPREP_KILL) { | 
 | 2138 | 			rq->cmd_flags |= REQ_QUIET; | 
| James Bottomley | c143dc9 | 2009-05-30 06:43:49 +0200 | [diff] [blame] | 2139 | 			/* | 
 | 2140 | 			 * Mark this request as started so we don't trigger | 
 | 2141 | 			 * any debug logic in the end I/O path. | 
 | 2142 | 			 */ | 
 | 2143 | 			blk_start_request(rq); | 
| Tejun Heo | 40cbbb7 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 2144 | 			__blk_end_request_all(rq, -EIO); | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2145 | 		} else { | 
 | 2146 | 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); | 
 | 2147 | 			break; | 
 | 2148 | 		} | 
 | 2149 | 	} | 
 | 2150 |  | 
 | 2151 | 	return rq; | 
 | 2152 | } | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2153 | EXPORT_SYMBOL(blk_peek_request); | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2154 |  | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2155 | void blk_dequeue_request(struct request *rq) | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2156 | { | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2157 | 	struct request_queue *q = rq->q; | 
 | 2158 |  | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2159 | 	BUG_ON(list_empty(&rq->queuelist)); | 
 | 2160 | 	BUG_ON(ELV_ON_HASH(rq)); | 
 | 2161 |  | 
 | 2162 | 	list_del_init(&rq->queuelist); | 
 | 2163 |  | 
 | 2164 | 	/* | 
 | 2165 | 	 * the time frame between a request being removed from the lists | 
 | 2166 | 	 * and to it is freed is accounted as io that is in progress at | 
 | 2167 | 	 * the driver side. | 
 | 2168 | 	 */ | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 2169 | 	if (blk_account_rq(rq)) { | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 2170 | 		q->in_flight[rq_is_sync(rq)]++; | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 2171 | 		set_io_start_time_ns(rq); | 
 | 2172 | 	} | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2173 | } | 
 | 2174 |  | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2175 | /** | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2176 |  * blk_start_request - start request processing on the driver | 
 | 2177 |  * @req: request to dequeue | 
 | 2178 |  * | 
 | 2179 |  * Description: | 
 | 2180 |  *     Dequeue @req and start timeout timer on it.  This hands off the | 
 | 2181 |  *     request to the driver. | 
 | 2182 |  * | 
 | 2183 |  *     Block internal functions which don't want to start timer should | 
 | 2184 |  *     call blk_dequeue_request(). | 
 | 2185 |  * | 
 | 2186 |  * Context: | 
 | 2187 |  *     queue_lock must be held. | 
 | 2188 |  */ | 
 | 2189 | void blk_start_request(struct request *req) | 
 | 2190 | { | 
 | 2191 | 	blk_dequeue_request(req); | 
 | 2192 |  | 
 | 2193 | 	/* | 
| Tejun Heo | 5f49f63 | 2009-05-19 18:33:05 +0900 | [diff] [blame] | 2194 | 	 * We are now handing the request to the hardware, initialize | 
 | 2195 | 	 * resid_len to full count and add the timeout handler. | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2196 | 	 */ | 
| Tejun Heo | 5f49f63 | 2009-05-19 18:33:05 +0900 | [diff] [blame] | 2197 | 	req->resid_len = blk_rq_bytes(req); | 
| FUJITA Tomonori | dbb66c4 | 2009-06-09 05:47:10 +0200 | [diff] [blame] | 2198 | 	if (unlikely(blk_bidi_rq(req))) | 
 | 2199 | 		req->next_rq->resid_len = blk_rq_bytes(req->next_rq); | 
 | 2200 |  | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2201 | 	blk_add_timer(req); | 
 | 2202 | } | 
 | 2203 | EXPORT_SYMBOL(blk_start_request); | 
 | 2204 |  | 
 | 2205 | /** | 
 | 2206 |  * blk_fetch_request - fetch a request from a request queue | 
 | 2207 |  * @q: request queue to fetch a request from | 
 | 2208 |  * | 
 | 2209 |  * Description: | 
 | 2210 |  *     Return the request at the top of @q.  The request is started on | 
 | 2211 |  *     return and LLD can start processing it immediately. | 
 | 2212 |  * | 
 | 2213 |  * Return: | 
 | 2214 |  *     Pointer to the request at the top of @q if available.  Null | 
 | 2215 |  *     otherwise. | 
 | 2216 |  * | 
 | 2217 |  * Context: | 
 | 2218 |  *     queue_lock must be held. | 
 | 2219 |  */ | 
 | 2220 | struct request *blk_fetch_request(struct request_queue *q) | 
 | 2221 | { | 
 | 2222 | 	struct request *rq; | 
 | 2223 |  | 
 | 2224 | 	rq = blk_peek_request(q); | 
 | 2225 | 	if (rq) | 
 | 2226 | 		blk_start_request(rq); | 
 | 2227 | 	return rq; | 
 | 2228 | } | 
 | 2229 | EXPORT_SYMBOL(blk_fetch_request); | 
 | 2230 |  | 
 | 2231 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2232 |  * blk_update_request - Special helper function for request stacking drivers | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2233 |  * @req:      the request being processed | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2234 |  * @error:    %0 for success, < %0 for error | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2235 |  * @nr_bytes: number of bytes to complete @req | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2236 |  * | 
 | 2237 |  * Description: | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2238 |  *     Ends I/O on a number of bytes attached to @req, but doesn't complete | 
 | 2239 |  *     the request structure even if @req doesn't have leftover. | 
 | 2240 |  *     If @req has leftover, sets it up for the next range of segments. | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2241 |  * | 
 | 2242 |  *     This special helper function is only for request stacking drivers | 
 | 2243 |  *     (e.g. request-based dm) so that they can handle partial completion. | 
 | 2244 |  *     Actual device drivers should use blk_end_request instead. | 
 | 2245 |  * | 
 | 2246 |  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees | 
 | 2247 |  *     %false return from this function. | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2248 |  * | 
 | 2249 |  * Return: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2250 |  *     %false - this request doesn't have any more data | 
 | 2251 |  *     %true  - this request has more data | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2252 |  **/ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2253 | bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | { | 
| Kiyoshi Ueda | 5450d3e | 2007-12-11 17:53:03 -0500 | [diff] [blame] | 2255 | 	int total_bytes, bio_nbytes, next_idx = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2256 | 	struct bio *bio; | 
 | 2257 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2258 | 	if (!req->bio) | 
 | 2259 | 		return false; | 
 | 2260 |  | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 2261 | 	trace_block_rq_complete(req->q, req); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 2262 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2263 | 	/* | 
| Tejun Heo | 6f41469 | 2009-04-19 07:00:41 +0900 | [diff] [blame] | 2264 | 	 * For fs requests, rq is just carrier of independent bio's | 
 | 2265 | 	 * and each partial completion should be handled separately. | 
 | 2266 | 	 * Reset per-request error on each partial completion. | 
 | 2267 | 	 * | 
 | 2268 | 	 * TODO: tj: This is too subtle.  It would be better to let | 
 | 2269 | 	 * low level drivers do what they see fit. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2270 | 	 */ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2271 | 	if (req->cmd_type == REQ_TYPE_FS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2272 | 		req->errors = 0; | 
 | 2273 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2274 | 	if (error && req->cmd_type == REQ_TYPE_FS && | 
 | 2275 | 	    !(req->cmd_flags & REQ_QUIET)) { | 
| Hannes Reinecke | 7977556 | 2011-01-18 10:13:13 +0100 | [diff] [blame] | 2276 | 		char *error_type; | 
 | 2277 |  | 
 | 2278 | 		switch (error) { | 
 | 2279 | 		case -ENOLINK: | 
 | 2280 | 			error_type = "recoverable transport"; | 
 | 2281 | 			break; | 
 | 2282 | 		case -EREMOTEIO: | 
 | 2283 | 			error_type = "critical target"; | 
 | 2284 | 			break; | 
 | 2285 | 		case -EBADE: | 
 | 2286 | 			error_type = "critical nexus"; | 
 | 2287 | 			break; | 
 | 2288 | 		case -EIO: | 
 | 2289 | 		default: | 
 | 2290 | 			error_type = "I/O"; | 
 | 2291 | 			break; | 
 | 2292 | 		} | 
| Yi Zou | 37d7b34 | 2012-08-30 16:26:25 -0700 | [diff] [blame] | 2293 | 		printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", | 
 | 2294 | 				   error_type, req->rq_disk ? | 
 | 2295 | 				   req->rq_disk->disk_name : "?", | 
 | 2296 | 				   (unsigned long long)blk_rq_pos(req)); | 
 | 2297 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2298 | 	} | 
 | 2299 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2300 | 	blk_account_io_completion(req, nr_bytes); | 
| Jens Axboe | d72d904 | 2005-11-01 08:35:42 +0100 | [diff] [blame] | 2301 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2302 | 	total_bytes = bio_nbytes = 0; | 
 | 2303 | 	while ((bio = req->bio) != NULL) { | 
 | 2304 | 		int nbytes; | 
 | 2305 |  | 
 | 2306 | 		if (nr_bytes >= bio->bi_size) { | 
 | 2307 | 			req->bio = bio->bi_next; | 
 | 2308 | 			nbytes = bio->bi_size; | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 2309 | 			req_bio_endio(req, bio, nbytes, error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2310 | 			next_idx = 0; | 
 | 2311 | 			bio_nbytes = 0; | 
 | 2312 | 		} else { | 
 | 2313 | 			int idx = bio->bi_idx + next_idx; | 
 | 2314 |  | 
| Kazuhisa Ichikawa | af498d7 | 2009-05-12 13:27:45 +0200 | [diff] [blame] | 2315 | 			if (unlikely(idx >= bio->bi_vcnt)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2316 | 				blk_dump_rq_flags(req, "__end_that"); | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 2317 | 				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", | 
| Kazuhisa Ichikawa | af498d7 | 2009-05-12 13:27:45 +0200 | [diff] [blame] | 2318 | 				       __func__, idx, bio->bi_vcnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2319 | 				break; | 
 | 2320 | 			} | 
 | 2321 |  | 
 | 2322 | 			nbytes = bio_iovec_idx(bio, idx)->bv_len; | 
 | 2323 | 			BIO_BUG_ON(nbytes > bio->bi_size); | 
 | 2324 |  | 
 | 2325 | 			/* | 
 | 2326 | 			 * not a complete bvec done | 
 | 2327 | 			 */ | 
 | 2328 | 			if (unlikely(nbytes > nr_bytes)) { | 
 | 2329 | 				bio_nbytes += nr_bytes; | 
 | 2330 | 				total_bytes += nr_bytes; | 
 | 2331 | 				break; | 
 | 2332 | 			} | 
 | 2333 |  | 
 | 2334 | 			/* | 
 | 2335 | 			 * advance to the next vector | 
 | 2336 | 			 */ | 
 | 2337 | 			next_idx++; | 
 | 2338 | 			bio_nbytes += nbytes; | 
 | 2339 | 		} | 
 | 2340 |  | 
 | 2341 | 		total_bytes += nbytes; | 
 | 2342 | 		nr_bytes -= nbytes; | 
 | 2343 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 2344 | 		bio = req->bio; | 
 | 2345 | 		if (bio) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2346 | 			/* | 
 | 2347 | 			 * end more in this run, or just return 'not-done' | 
 | 2348 | 			 */ | 
 | 2349 | 			if (unlikely(nr_bytes <= 0)) | 
 | 2350 | 				break; | 
 | 2351 | 		} | 
 | 2352 | 	} | 
 | 2353 |  | 
 | 2354 | 	/* | 
 | 2355 | 	 * completely done | 
 | 2356 | 	 */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2357 | 	if (!req->bio) { | 
 | 2358 | 		/* | 
 | 2359 | 		 * Reset counters so that the request stacking driver | 
 | 2360 | 		 * can find how many bytes remain in the request | 
 | 2361 | 		 * later. | 
 | 2362 | 		 */ | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2363 | 		req->__data_len = 0; | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2364 | 		return false; | 
 | 2365 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2366 |  | 
 | 2367 | 	/* | 
 | 2368 | 	 * if the request wasn't completed, update state | 
 | 2369 | 	 */ | 
 | 2370 | 	if (bio_nbytes) { | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 2371 | 		req_bio_endio(req, bio, bio_nbytes, error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2372 | 		bio->bi_idx += next_idx; | 
 | 2373 | 		bio_iovec(bio)->bv_offset += nr_bytes; | 
 | 2374 | 		bio_iovec(bio)->bv_len -= nr_bytes; | 
 | 2375 | 	} | 
 | 2376 |  | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2377 | 	req->__data_len -= total_bytes; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2378 | 	req->buffer = bio_data(req->bio); | 
 | 2379 |  | 
 | 2380 | 	/* update sector only for requests with clear definition of sector */ | 
| Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 2381 | 	if (req->cmd_type == REQ_TYPE_FS) | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2382 | 		req->__sector += total_bytes >> 9; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2383 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2384 | 	/* mixed attributes always follow the first bio */ | 
 | 2385 | 	if (req->cmd_flags & REQ_MIXED_MERGE) { | 
 | 2386 | 		req->cmd_flags &= ~REQ_FAILFAST_MASK; | 
 | 2387 | 		req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; | 
 | 2388 | 	} | 
 | 2389 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2390 | 	/* | 
 | 2391 | 	 * If total number of sectors is less than the first segment | 
 | 2392 | 	 * size, something has gone terribly wrong. | 
 | 2393 | 	 */ | 
 | 2394 | 	if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { | 
| Jens Axboe | 8182924 | 2011-03-30 09:51:33 +0200 | [diff] [blame] | 2395 | 		blk_dump_rq_flags(req, "request botched"); | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2396 | 		req->__data_len = blk_rq_cur_bytes(req); | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2397 | 	} | 
 | 2398 |  | 
 | 2399 | 	/* recalculate the number of segments */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2400 | 	blk_recalc_rq_segments(req); | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2401 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2402 | 	return true; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2403 | } | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2404 | EXPORT_SYMBOL_GPL(blk_update_request); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2405 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2406 | static bool blk_update_bidi_request(struct request *rq, int error, | 
 | 2407 | 				    unsigned int nr_bytes, | 
 | 2408 | 				    unsigned int bidi_bytes) | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2409 | { | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2410 | 	if (blk_update_request(rq, error, nr_bytes)) | 
 | 2411 | 		return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2412 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2413 | 	/* Bidi request must be completed as a whole */ | 
 | 2414 | 	if (unlikely(blk_bidi_rq(rq)) && | 
 | 2415 | 	    blk_update_request(rq->next_rq, error, bidi_bytes)) | 
 | 2416 | 		return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2417 |  | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 2418 | 	if (blk_queue_add_random(rq->q)) | 
 | 2419 | 		add_disk_randomness(rq->rq_disk); | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2420 |  | 
 | 2421 | 	return false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2422 | } | 
 | 2423 |  | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 2424 | /** | 
 | 2425 |  * blk_unprep_request - unprepare a request | 
 | 2426 |  * @req:	the request | 
 | 2427 |  * | 
 | 2428 |  * This function makes a request ready for complete resubmission (or | 
 | 2429 |  * completion).  It happens only after all error handling is complete, | 
 | 2430 |  * so represents the appropriate moment to deallocate any resources | 
 | 2431 |  * that were allocated to the request in the prep_rq_fn.  The queue | 
 | 2432 |  * lock is held when calling this. | 
 | 2433 |  */ | 
 | 2434 | void blk_unprep_request(struct request *req) | 
 | 2435 | { | 
 | 2436 | 	struct request_queue *q = req->q; | 
 | 2437 |  | 
 | 2438 | 	req->cmd_flags &= ~REQ_DONTPREP; | 
 | 2439 | 	if (q->unprep_rq_fn) | 
 | 2440 | 		q->unprep_rq_fn(q, req); | 
 | 2441 | } | 
 | 2442 | EXPORT_SYMBOL_GPL(blk_unprep_request); | 
 | 2443 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2444 | /* | 
 | 2445 |  * queue lock must be held | 
 | 2446 |  */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2447 | static void blk_finish_request(struct request *req, int error) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2448 | { | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2449 | 	if (blk_rq_tagged(req)) | 
 | 2450 | 		blk_queue_end_tag(req->q, req); | 
 | 2451 |  | 
| James Bottomley | ba396a6 | 2009-05-27 14:17:08 +0200 | [diff] [blame] | 2452 | 	BUG_ON(blk_queued_rq(req)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2453 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2454 | 	if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 2455 | 		laptop_io_completion(&req->q->backing_dev_info); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2456 |  | 
| Mike Anderson | e78042e | 2008-10-30 02:16:20 -0700 | [diff] [blame] | 2457 | 	blk_delete_timer(req); | 
 | 2458 |  | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 2459 | 	if (req->cmd_flags & REQ_DONTPREP) | 
 | 2460 | 		blk_unprep_request(req); | 
 | 2461 |  | 
 | 2462 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2463 | 	blk_account_io_done(req); | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2464 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2465 | 	if (req->end_io) | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 2466 | 		req->end_io(req, error); | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2467 | 	else { | 
 | 2468 | 		if (blk_bidi_rq(req)) | 
 | 2469 | 			__blk_put_request(req->next_rq->q, req->next_rq); | 
 | 2470 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2471 | 		__blk_put_request(req->q, req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2472 | 	} | 
 | 2473 | } | 
 | 2474 |  | 
| Kiyoshi Ueda | 3b11313 | 2007-12-11 17:41:17 -0500 | [diff] [blame] | 2475 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2476 |  * blk_end_bidi_request - Complete a bidi request | 
 | 2477 |  * @rq:         the request to complete | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 2478 |  * @error:      %0 for success, < %0 for error | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2479 |  * @nr_bytes:   number of bytes to complete @rq | 
 | 2480 |  * @bidi_bytes: number of bytes to complete @rq->next_rq | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2481 |  * | 
 | 2482 |  * Description: | 
 | 2483 |  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2484 |  *     Drivers that supports bidi can safely call this member for any | 
 | 2485 |  *     type of request, bidi or uni.  In the later case @bidi_bytes is | 
 | 2486 |  *     just ignored. | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2487 |  * | 
 | 2488 |  * Return: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2489 |  *     %false - we are done with this request | 
 | 2490 |  *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2491 |  **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2492 | static bool blk_end_bidi_request(struct request *rq, int error, | 
 | 2493 | 				 unsigned int nr_bytes, unsigned int bidi_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2494 | { | 
 | 2495 | 	struct request_queue *q = rq->q; | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2496 | 	unsigned long flags; | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2497 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2498 | 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) | 
 | 2499 | 		return true; | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2500 |  | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2501 | 	spin_lock_irqsave(q->queue_lock, flags); | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2502 | 	blk_finish_request(rq, error); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2503 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 2504 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2505 | 	return false; | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2506 | } | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2507 |  | 
 | 2508 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2509 |  * __blk_end_bidi_request - Complete a bidi request with queue lock held | 
 | 2510 |  * @rq:         the request to complete | 
 | 2511 |  * @error:      %0 for success, < %0 for error | 
 | 2512 |  * @nr_bytes:   number of bytes to complete @rq | 
 | 2513 |  * @bidi_bytes: number of bytes to complete @rq->next_rq | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2514 |  * | 
 | 2515 |  * Description: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2516 |  *     Identical to blk_end_bidi_request() except that queue lock is | 
 | 2517 |  *     assumed to be locked on entry and remains so on return. | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2518 |  * | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2519 |  * Return: | 
 | 2520 |  *     %false - we are done with this request | 
 | 2521 |  *     %true  - still buffers pending for this request | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2522 |  **/ | 
| Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 2523 | bool __blk_end_bidi_request(struct request *rq, int error, | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2524 | 				   unsigned int nr_bytes, unsigned int bidi_bytes) | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2525 | { | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2526 | 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) | 
 | 2527 | 		return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2528 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2529 | 	blk_finish_request(rq, error); | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2530 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2531 | 	return false; | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2532 | } | 
 | 2533 |  | 
 | 2534 | /** | 
 | 2535 |  * blk_end_request - Helper function for drivers to complete the request. | 
 | 2536 |  * @rq:       the request being processed | 
 | 2537 |  * @error:    %0 for success, < %0 for error | 
 | 2538 |  * @nr_bytes: number of bytes to complete | 
 | 2539 |  * | 
 | 2540 |  * Description: | 
 | 2541 |  *     Ends I/O on a number of bytes attached to @rq. | 
 | 2542 |  *     If @rq has leftover, sets it up for the next range of segments. | 
 | 2543 |  * | 
 | 2544 |  * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2545 |  *     %false - we are done with this request | 
 | 2546 |  *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2547 |  **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2548 | bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2549 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2550 | 	return blk_end_bidi_request(rq, error, nr_bytes, 0); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2551 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2552 | EXPORT_SYMBOL(blk_end_request); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2553 |  | 
 | 2554 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2555 |  * blk_end_request_all - Helper function for drives to finish the request. | 
 | 2556 |  * @rq: the request to finish | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2557 |  * @error: %0 for success, < %0 for error | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2558 |  * | 
 | 2559 |  * Description: | 
 | 2560 |  *     Completely finish @rq. | 
 | 2561 |  */ | 
 | 2562 | void blk_end_request_all(struct request *rq, int error) | 
 | 2563 | { | 
 | 2564 | 	bool pending; | 
 | 2565 | 	unsigned int bidi_bytes = 0; | 
 | 2566 |  | 
 | 2567 | 	if (unlikely(blk_bidi_rq(rq))) | 
 | 2568 | 		bidi_bytes = blk_rq_bytes(rq->next_rq); | 
 | 2569 |  | 
 | 2570 | 	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); | 
 | 2571 | 	BUG_ON(pending); | 
 | 2572 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2573 | EXPORT_SYMBOL(blk_end_request_all); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2574 |  | 
 | 2575 | /** | 
 | 2576 |  * blk_end_request_cur - Helper function to finish the current request chunk. | 
 | 2577 |  * @rq: the request to finish the current chunk for | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2578 |  * @error: %0 for success, < %0 for error | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2579 |  * | 
 | 2580 |  * Description: | 
 | 2581 |  *     Complete the current consecutively mapped chunk from @rq. | 
 | 2582 |  * | 
 | 2583 |  * Return: | 
 | 2584 |  *     %false - we are done with this request | 
 | 2585 |  *     %true  - still buffers pending for this request | 
 | 2586 |  */ | 
 | 2587 | bool blk_end_request_cur(struct request *rq, int error) | 
 | 2588 | { | 
 | 2589 | 	return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 
 | 2590 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2591 | EXPORT_SYMBOL(blk_end_request_cur); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2592 |  | 
 | 2593 | /** | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2594 |  * blk_end_request_err - Finish a request till the next failure boundary. | 
 | 2595 |  * @rq: the request to finish till the next failure boundary for | 
 | 2596 |  * @error: must be negative errno | 
 | 2597 |  * | 
 | 2598 |  * Description: | 
 | 2599 |  *     Complete @rq till the next failure boundary. | 
 | 2600 |  * | 
 | 2601 |  * Return: | 
 | 2602 |  *     %false - we are done with this request | 
 | 2603 |  *     %true  - still buffers pending for this request | 
 | 2604 |  */ | 
 | 2605 | bool blk_end_request_err(struct request *rq, int error) | 
 | 2606 | { | 
 | 2607 | 	WARN_ON(error >= 0); | 
 | 2608 | 	return blk_end_request(rq, error, blk_rq_err_bytes(rq)); | 
 | 2609 | } | 
 | 2610 | EXPORT_SYMBOL_GPL(blk_end_request_err); | 
 | 2611 |  | 
 | 2612 | /** | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2613 |  * __blk_end_request - Helper function for drivers to complete the request. | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2614 |  * @rq:       the request being processed | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2615 |  * @error:    %0 for success, < %0 for error | 
 | 2616 |  * @nr_bytes: number of bytes to complete | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2617 |  * | 
 | 2618 |  * Description: | 
 | 2619 |  *     Must be called with queue lock held unlike blk_end_request(). | 
 | 2620 |  * | 
 | 2621 |  * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2622 |  *     %false - we are done with this request | 
 | 2623 |  *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2624 |  **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2625 | bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2626 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2627 | 	return __blk_end_bidi_request(rq, error, nr_bytes, 0); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2628 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2629 | EXPORT_SYMBOL(__blk_end_request); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2630 |  | 
 | 2631 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2632 |  * __blk_end_request_all - Helper function for drives to finish the request. | 
 | 2633 |  * @rq: the request to finish | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2634 |  * @error: %0 for success, < %0 for error | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2635 |  * | 
 | 2636 |  * Description: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2637 |  *     Completely finish @rq.  Must be called with queue lock held. | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2638 |  */ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2639 | void __blk_end_request_all(struct request *rq, int error) | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2640 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2641 | 	bool pending; | 
 | 2642 | 	unsigned int bidi_bytes = 0; | 
 | 2643 |  | 
 | 2644 | 	if (unlikely(blk_bidi_rq(rq))) | 
 | 2645 | 		bidi_bytes = blk_rq_bytes(rq->next_rq); | 
 | 2646 |  | 
 | 2647 | 	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); | 
 | 2648 | 	BUG_ON(pending); | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2649 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2650 | EXPORT_SYMBOL(__blk_end_request_all); | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2651 |  | 
 | 2652 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2653 |  * __blk_end_request_cur - Helper function to finish the current request chunk. | 
 | 2654 |  * @rq: the request to finish the current chunk for | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2655 |  * @error: %0 for success, < %0 for error | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2656 |  * | 
 | 2657 |  * Description: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2658 |  *     Complete the current consecutively mapped chunk from @rq.  Must | 
 | 2659 |  *     be called with queue lock held. | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2660 |  * | 
 | 2661 |  * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2662 |  *     %false - we are done with this request | 
 | 2663 |  *     %true  - still buffers pending for this request | 
 | 2664 |  */ | 
 | 2665 | bool __blk_end_request_cur(struct request *rq, int error) | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2666 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2667 | 	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2668 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2669 | EXPORT_SYMBOL(__blk_end_request_cur); | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2670 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2671 | /** | 
 | 2672 |  * __blk_end_request_err - Finish a request till the next failure boundary. | 
 | 2673 |  * @rq: the request to finish till the next failure boundary for | 
 | 2674 |  * @error: must be negative errno | 
 | 2675 |  * | 
 | 2676 |  * Description: | 
 | 2677 |  *     Complete @rq till the next failure boundary.  Must be called | 
 | 2678 |  *     with queue lock held. | 
 | 2679 |  * | 
 | 2680 |  * Return: | 
 | 2681 |  *     %false - we are done with this request | 
 | 2682 |  *     %true  - still buffers pending for this request | 
 | 2683 |  */ | 
 | 2684 | bool __blk_end_request_err(struct request *rq, int error) | 
 | 2685 | { | 
 | 2686 | 	WARN_ON(error >= 0); | 
 | 2687 | 	return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); | 
 | 2688 | } | 
 | 2689 | EXPORT_SYMBOL_GPL(__blk_end_request_err); | 
 | 2690 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 2691 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 
 | 2692 | 		     struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2693 | { | 
| Tejun Heo | a82afdf | 2009-07-03 17:48:16 +0900 | [diff] [blame] | 2694 | 	/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 2695 | 	rq->cmd_flags |= bio->bi_rw & REQ_WRITE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2696 |  | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 2697 | 	if (bio_has_data(bio)) { | 
 | 2698 | 		rq->nr_phys_segments = bio_phys_segments(q, bio); | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 2699 | 		rq->buffer = bio_data(bio); | 
 | 2700 | 	} | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2701 | 	rq->__data_len = bio->bi_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2702 | 	rq->bio = rq->biotail = bio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2703 |  | 
| NeilBrown | 6684657 | 2007-08-16 13:31:28 +0200 | [diff] [blame] | 2704 | 	if (bio->bi_bdev) | 
 | 2705 | 		rq->rq_disk = bio->bi_bdev->bd_disk; | 
 | 2706 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2707 |  | 
| Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 2708 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 
 | 2709 | /** | 
 | 2710 |  * rq_flush_dcache_pages - Helper function to flush all pages in a request | 
 | 2711 |  * @rq: the request to be flushed | 
 | 2712 |  * | 
 | 2713 |  * Description: | 
 | 2714 |  *     Flush all pages in @rq. | 
 | 2715 |  */ | 
 | 2716 | void rq_flush_dcache_pages(struct request *rq) | 
 | 2717 | { | 
 | 2718 | 	struct req_iterator iter; | 
 | 2719 | 	struct bio_vec *bvec; | 
 | 2720 |  | 
 | 2721 | 	rq_for_each_segment(bvec, rq, iter) | 
 | 2722 | 		flush_dcache_page(bvec->bv_page); | 
 | 2723 | } | 
 | 2724 | EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); | 
 | 2725 | #endif | 
 | 2726 |  | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 2727 | /** | 
 | 2728 |  * blk_lld_busy - Check if underlying low-level drivers of a device are busy | 
 | 2729 |  * @q : the queue of the device being checked | 
 | 2730 |  * | 
 | 2731 |  * Description: | 
 | 2732 |  *    Check if underlying low-level drivers of a device are busy. | 
 | 2733 |  *    If the drivers want to export their busy state, they must set own | 
 | 2734 |  *    exporting function using blk_queue_lld_busy() first. | 
 | 2735 |  * | 
 | 2736 |  *    Basically, this function is used only by request stacking drivers | 
 | 2737 |  *    to stop dispatching requests to underlying devices when underlying | 
 | 2738 |  *    devices are busy.  This behavior helps more I/O merging on the queue | 
 | 2739 |  *    of the request stacking driver and prevents I/O throughput regression | 
 | 2740 |  *    on burst I/O load. | 
 | 2741 |  * | 
 | 2742 |  * Return: | 
 | 2743 |  *    0 - Not busy (The request stacking driver should dispatch request) | 
 | 2744 |  *    1 - Busy (The request stacking driver should stop dispatching request) | 
 | 2745 |  */ | 
 | 2746 | int blk_lld_busy(struct request_queue *q) | 
 | 2747 | { | 
 | 2748 | 	if (q->lld_busy_fn) | 
 | 2749 | 		return q->lld_busy_fn(q); | 
 | 2750 |  | 
 | 2751 | 	return 0; | 
 | 2752 | } | 
 | 2753 | EXPORT_SYMBOL_GPL(blk_lld_busy); | 
 | 2754 |  | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2755 | /** | 
 | 2756 |  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request | 
 | 2757 |  * @rq: the clone request to be cleaned up | 
 | 2758 |  * | 
 | 2759 |  * Description: | 
 | 2760 |  *     Free all bios in @rq for a cloned request. | 
 | 2761 |  */ | 
 | 2762 | void blk_rq_unprep_clone(struct request *rq) | 
 | 2763 | { | 
 | 2764 | 	struct bio *bio; | 
 | 2765 |  | 
 | 2766 | 	while ((bio = rq->bio) != NULL) { | 
 | 2767 | 		rq->bio = bio->bi_next; | 
 | 2768 |  | 
 | 2769 | 		bio_put(bio); | 
 | 2770 | 	} | 
 | 2771 | } | 
 | 2772 | EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); | 
 | 2773 |  | 
 | 2774 | /* | 
 | 2775 |  * Copy attributes of the original request to the clone request. | 
 | 2776 |  * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. | 
 | 2777 |  */ | 
 | 2778 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) | 
 | 2779 | { | 
 | 2780 | 	dst->cpu = src->cpu; | 
| Tejun Heo | 3a2edd0 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 2781 | 	dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2782 | 	dst->cmd_type = src->cmd_type; | 
 | 2783 | 	dst->__sector = blk_rq_pos(src); | 
 | 2784 | 	dst->__data_len = blk_rq_bytes(src); | 
 | 2785 | 	dst->nr_phys_segments = src->nr_phys_segments; | 
 | 2786 | 	dst->ioprio = src->ioprio; | 
 | 2787 | 	dst->extra_len = src->extra_len; | 
 | 2788 | } | 
 | 2789 |  | 
 | 2790 | /** | 
 | 2791 |  * blk_rq_prep_clone - Helper function to setup clone request | 
 | 2792 |  * @rq: the request to be setup | 
 | 2793 |  * @rq_src: original request to be cloned | 
 | 2794 |  * @bs: bio_set that bios for clone are allocated from | 
 | 2795 |  * @gfp_mask: memory allocation mask for bio | 
 | 2796 |  * @bio_ctr: setup function to be called for each clone bio. | 
 | 2797 |  *           Returns %0 for success, non %0 for failure. | 
 | 2798 |  * @data: private data to be passed to @bio_ctr | 
 | 2799 |  * | 
 | 2800 |  * Description: | 
 | 2801 |  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. | 
 | 2802 |  *     The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) | 
 | 2803 |  *     are not copied, and copying such parts is the caller's responsibility. | 
 | 2804 |  *     Also, pages which the original bios are pointing to are not copied | 
 | 2805 |  *     and the cloned bios just point same pages. | 
 | 2806 |  *     So cloned bios must be completed before original bios, which means | 
 | 2807 |  *     the caller must complete @rq before @rq_src. | 
 | 2808 |  */ | 
 | 2809 | int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 
 | 2810 | 		      struct bio_set *bs, gfp_t gfp_mask, | 
 | 2811 | 		      int (*bio_ctr)(struct bio *, struct bio *, void *), | 
 | 2812 | 		      void *data) | 
 | 2813 | { | 
 | 2814 | 	struct bio *bio, *bio_src; | 
 | 2815 |  | 
 | 2816 | 	if (!bs) | 
 | 2817 | 		bs = fs_bio_set; | 
 | 2818 |  | 
 | 2819 | 	blk_rq_init(NULL, rq); | 
 | 2820 |  | 
 | 2821 | 	__rq_for_each_bio(bio_src, rq_src) { | 
| Kent Overstreet | bf800ef | 2012-09-06 15:35:02 -0700 | [diff] [blame] | 2822 | 		bio = bio_clone_bioset(bio_src, gfp_mask, bs); | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2823 | 		if (!bio) | 
 | 2824 | 			goto free_and_out; | 
 | 2825 |  | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2826 | 		if (bio_ctr && bio_ctr(bio, bio_src, data)) | 
 | 2827 | 			goto free_and_out; | 
 | 2828 |  | 
 | 2829 | 		if (rq->bio) { | 
 | 2830 | 			rq->biotail->bi_next = bio; | 
 | 2831 | 			rq->biotail = bio; | 
 | 2832 | 		} else | 
 | 2833 | 			rq->bio = rq->biotail = bio; | 
 | 2834 | 	} | 
 | 2835 |  | 
 | 2836 | 	__blk_rq_prep_clone(rq, rq_src); | 
 | 2837 |  | 
 | 2838 | 	return 0; | 
 | 2839 |  | 
 | 2840 | free_and_out: | 
 | 2841 | 	if (bio) | 
| Kent Overstreet | 4254bba | 2012-09-06 15:35:00 -0700 | [diff] [blame] | 2842 | 		bio_put(bio); | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2843 | 	blk_rq_unprep_clone(rq); | 
 | 2844 |  | 
 | 2845 | 	return -ENOMEM; | 
 | 2846 | } | 
 | 2847 | EXPORT_SYMBOL_GPL(blk_rq_prep_clone); | 
 | 2848 |  | 
| Jens Axboe | 18887ad | 2008-07-28 13:08:45 +0200 | [diff] [blame] | 2849 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2850 | { | 
 | 2851 | 	return queue_work(kblockd_workqueue, work); | 
 | 2852 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2853 | EXPORT_SYMBOL(kblockd_schedule_work); | 
 | 2854 |  | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 2855 | int kblockd_schedule_delayed_work(struct request_queue *q, | 
 | 2856 | 			struct delayed_work *dwork, unsigned long delay) | 
 | 2857 | { | 
 | 2858 | 	return queue_delayed_work(kblockd_workqueue, dwork, delay); | 
 | 2859 | } | 
 | 2860 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | 
 | 2861 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2862 | #define PLUG_MAGIC	0x91827364 | 
 | 2863 |  | 
| Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 2864 | /** | 
 | 2865 |  * blk_start_plug - initialize blk_plug and track it inside the task_struct | 
 | 2866 |  * @plug:	The &struct blk_plug that needs to be initialized | 
 | 2867 |  * | 
 | 2868 |  * Description: | 
 | 2869 |  *   Tracking blk_plug inside the task_struct will help with auto-flushing the | 
 | 2870 |  *   pending I/O should the task end up blocking between blk_start_plug() and | 
 | 2871 |  *   blk_finish_plug(). This is important from a performance perspective, but | 
 | 2872 |  *   also ensures that we don't deadlock. For instance, if the task is blocking | 
 | 2873 |  *   for a memory allocation, memory reclaim could end up wanting to free a | 
 | 2874 |  *   page belonging to that request that is currently residing in our private | 
 | 2875 |  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid | 
 | 2876 |  *   this kind of deadlock. | 
 | 2877 |  */ | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2878 | void blk_start_plug(struct blk_plug *plug) | 
 | 2879 | { | 
 | 2880 | 	struct task_struct *tsk = current; | 
 | 2881 |  | 
 | 2882 | 	plug->magic = PLUG_MAGIC; | 
 | 2883 | 	INIT_LIST_HEAD(&plug->list); | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 2884 | 	INIT_LIST_HEAD(&plug->cb_list); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2885 |  | 
 | 2886 | 	/* | 
 | 2887 | 	 * If this is a nested plug, don't actually assign it. It will be | 
 | 2888 | 	 * flushed on its own. | 
 | 2889 | 	 */ | 
 | 2890 | 	if (!tsk->plug) { | 
 | 2891 | 		/* | 
 | 2892 | 		 * Store ordering should not be needed here, since a potential | 
 | 2893 | 		 * preempt will imply a full memory barrier | 
 | 2894 | 		 */ | 
 | 2895 | 		tsk->plug = plug; | 
 | 2896 | 	} | 
 | 2897 | } | 
 | 2898 | EXPORT_SYMBOL(blk_start_plug); | 
 | 2899 |  | 
 | 2900 | static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) | 
 | 2901 | { | 
 | 2902 | 	struct request *rqa = container_of(a, struct request, queuelist); | 
 | 2903 | 	struct request *rqb = container_of(b, struct request, queuelist); | 
 | 2904 |  | 
| Jianpeng Ma | 975927b | 2012-10-25 21:58:17 +0200 | [diff] [blame] | 2905 | 	return !(rqa->q < rqb->q || | 
 | 2906 | 		(rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2907 | } | 
 | 2908 |  | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2909 | /* | 
 | 2910 |  * If 'from_schedule' is true, then postpone the dispatch of requests | 
 | 2911 |  * until a safe kblockd context. We due this to avoid accidental big | 
 | 2912 |  * additional stack usage in driver dispatch, in places where the originally | 
 | 2913 |  * plugger did not intend it. | 
 | 2914 |  */ | 
| Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 2915 | static void queue_unplugged(struct request_queue *q, unsigned int depth, | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2916 | 			    bool from_schedule) | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2917 | 	__releases(q->queue_lock) | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2918 | { | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2919 | 	trace_block_unplug(q, depth, !from_schedule); | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2920 |  | 
| Bart Van Assche | 7046057 | 2012-11-28 13:45:56 +0100 | [diff] [blame] | 2921 | 	if (from_schedule) | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 2922 | 		blk_run_queue_async(q); | 
| Bart Van Assche | 7046057 | 2012-11-28 13:45:56 +0100 | [diff] [blame] | 2923 | 	else | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 2924 | 		__blk_run_queue(q); | 
| Bart Van Assche | 7046057 | 2012-11-28 13:45:56 +0100 | [diff] [blame] | 2925 | 	spin_unlock(q->queue_lock); | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2926 | } | 
 | 2927 |  | 
| NeilBrown | 74018dc | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 2928 | static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 2929 | { | 
 | 2930 | 	LIST_HEAD(callbacks); | 
 | 2931 |  | 
| Shaohua Li | 2a7d555 | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 2932 | 	while (!list_empty(&plug->cb_list)) { | 
 | 2933 | 		list_splice_init(&plug->cb_list, &callbacks); | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 2934 |  | 
| Shaohua Li | 2a7d555 | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 2935 | 		while (!list_empty(&callbacks)) { | 
 | 2936 | 			struct blk_plug_cb *cb = list_first_entry(&callbacks, | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 2937 | 							  struct blk_plug_cb, | 
 | 2938 | 							  list); | 
| Shaohua Li | 2a7d555 | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 2939 | 			list_del(&cb->list); | 
| NeilBrown | 74018dc | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 2940 | 			cb->callback(cb, from_schedule); | 
| Shaohua Li | 2a7d555 | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 2941 | 		} | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 2942 | 	} | 
 | 2943 | } | 
 | 2944 |  | 
| NeilBrown | 9cbb175 | 2012-07-31 09:08:14 +0200 | [diff] [blame] | 2945 | struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, | 
 | 2946 | 				      int size) | 
 | 2947 | { | 
 | 2948 | 	struct blk_plug *plug = current->plug; | 
 | 2949 | 	struct blk_plug_cb *cb; | 
 | 2950 |  | 
 | 2951 | 	if (!plug) | 
 | 2952 | 		return NULL; | 
 | 2953 |  | 
 | 2954 | 	list_for_each_entry(cb, &plug->cb_list, list) | 
 | 2955 | 		if (cb->callback == unplug && cb->data == data) | 
 | 2956 | 			return cb; | 
 | 2957 |  | 
 | 2958 | 	/* Not currently on the callback list */ | 
 | 2959 | 	BUG_ON(size < sizeof(*cb)); | 
 | 2960 | 	cb = kzalloc(size, GFP_ATOMIC); | 
 | 2961 | 	if (cb) { | 
 | 2962 | 		cb->data = data; | 
 | 2963 | 		cb->callback = unplug; | 
 | 2964 | 		list_add(&cb->list, &plug->cb_list); | 
 | 2965 | 	} | 
 | 2966 | 	return cb; | 
 | 2967 | } | 
 | 2968 | EXPORT_SYMBOL(blk_check_plugged); | 
 | 2969 |  | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2970 | void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2971 | { | 
 | 2972 | 	struct request_queue *q; | 
 | 2973 | 	unsigned long flags; | 
 | 2974 | 	struct request *rq; | 
| NeilBrown | 109b812 | 2011-04-11 14:13:10 +0200 | [diff] [blame] | 2975 | 	LIST_HEAD(list); | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2976 | 	unsigned int depth; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2977 |  | 
 | 2978 | 	BUG_ON(plug->magic != PLUG_MAGIC); | 
 | 2979 |  | 
| NeilBrown | 74018dc | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 2980 | 	flush_plug_callbacks(plug, from_schedule); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2981 | 	if (list_empty(&plug->list)) | 
 | 2982 | 		return; | 
 | 2983 |  | 
| NeilBrown | 109b812 | 2011-04-11 14:13:10 +0200 | [diff] [blame] | 2984 | 	list_splice_init(&plug->list, &list); | 
 | 2985 |  | 
| Jianpeng Ma | 422765c | 2013-01-11 14:46:09 +0100 | [diff] [blame] | 2986 | 	list_sort(NULL, &list, plug_rq_cmp); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2987 |  | 
 | 2988 | 	q = NULL; | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2989 | 	depth = 0; | 
| Jens Axboe | 1881127 | 2011-04-12 10:11:24 +0200 | [diff] [blame] | 2990 |  | 
 | 2991 | 	/* | 
 | 2992 | 	 * Save and disable interrupts here, to avoid doing it for every | 
 | 2993 | 	 * queue lock we have to take. | 
 | 2994 | 	 */ | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2995 | 	local_irq_save(flags); | 
| NeilBrown | 109b812 | 2011-04-11 14:13:10 +0200 | [diff] [blame] | 2996 | 	while (!list_empty(&list)) { | 
 | 2997 | 		rq = list_entry_rq(list.next); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2998 | 		list_del_init(&rq->queuelist); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2999 | 		BUG_ON(!rq->q); | 
 | 3000 | 		if (rq->q != q) { | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 3001 | 			/* | 
 | 3002 | 			 * This drops the queue lock | 
 | 3003 | 			 */ | 
 | 3004 | 			if (q) | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 3005 | 				queue_unplugged(q, depth, from_schedule); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 3006 | 			q = rq->q; | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 3007 | 			depth = 0; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 3008 | 			spin_lock(q->queue_lock); | 
 | 3009 | 		} | 
| Tejun Heo | 8ba6143 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 3010 |  | 
 | 3011 | 		/* | 
 | 3012 | 		 * Short-circuit if @q is dead | 
 | 3013 | 		 */ | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 3014 | 		if (unlikely(blk_queue_dying(q))) { | 
| Tejun Heo | 8ba6143 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 3015 | 			__blk_end_request_all(rq, -ENODEV); | 
 | 3016 | 			continue; | 
 | 3017 | 		} | 
 | 3018 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 3019 | 		/* | 
 | 3020 | 		 * rq is already accounted, so use raw insert | 
 | 3021 | 		 */ | 
| Jens Axboe | 401a18e | 2011-03-25 16:57:52 +0100 | [diff] [blame] | 3022 | 		if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) | 
 | 3023 | 			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); | 
 | 3024 | 		else | 
 | 3025 | 			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 3026 |  | 
 | 3027 | 		depth++; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 3028 | 	} | 
 | 3029 |  | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 3030 | 	/* | 
 | 3031 | 	 * This drops the queue lock | 
 | 3032 | 	 */ | 
 | 3033 | 	if (q) | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 3034 | 		queue_unplugged(q, depth, from_schedule); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 3035 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 3036 | 	local_irq_restore(flags); | 
 | 3037 | } | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 3038 |  | 
 | 3039 | void blk_finish_plug(struct blk_plug *plug) | 
 | 3040 | { | 
| Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 3041 | 	blk_flush_plug_list(plug, false); | 
| Christoph Hellwig | 88b996c | 2011-04-15 15:20:10 +0200 | [diff] [blame] | 3042 |  | 
 | 3043 | 	if (plug == current->plug) | 
 | 3044 | 		current->plug = NULL; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 3045 | } | 
 | 3046 | EXPORT_SYMBOL(blk_finish_plug); | 
 | 3047 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3048 | int __init blk_dev_init(void) | 
 | 3049 | { | 
| Nikanth Karthikesan | 9eb55b0 | 2009-04-27 14:53:54 +0200 | [diff] [blame] | 3050 | 	BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 
 | 3051 | 			sizeof(((struct request *)0)->cmd_flags)); | 
 | 3052 |  | 
| Tejun Heo | 89b90be | 2011-01-03 15:01:47 +0100 | [diff] [blame] | 3053 | 	/* used for unplugging and affects IO latency/throughput - HIGHPRI */ | 
 | 3054 | 	kblockd_workqueue = alloc_workqueue("kblockd", | 
 | 3055 | 					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3056 | 	if (!kblockd_workqueue) | 
 | 3057 | 		panic("Failed to create kblockd\n"); | 
 | 3058 |  | 
 | 3059 | 	request_cachep = kmem_cache_create("blkdev_requests", | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3060 | 			sizeof(struct request), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3061 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 3062 | 	blk_requestq_cachep = kmem_cache_create("blkdev_queue", | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 3063 | 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3064 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3065 | 	return 0; | 
 | 3066 | } |