| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * Copyright (C) 1991, 1992 Linus Torvalds | 
 | 3 |  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics | 
 | 4 |  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE | 
 | 5 |  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 6 |  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> | 
 | 7 |  *	-  July2000 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 |  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | /* | 
 | 12 |  * This handles all read/write requests to block devices | 
 | 13 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel.h> | 
 | 15 | #include <linux/module.h> | 
 | 16 | #include <linux/backing-dev.h> | 
 | 17 | #include <linux/bio.h> | 
 | 18 | #include <linux/blkdev.h> | 
 | 19 | #include <linux/highmem.h> | 
 | 20 | #include <linux/mm.h> | 
 | 21 | #include <linux/kernel_stat.h> | 
 | 22 | #include <linux/string.h> | 
 | 23 | #include <linux/init.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/completion.h> | 
 | 25 | #include <linux/slab.h> | 
 | 26 | #include <linux/swap.h> | 
 | 27 | #include <linux/writeback.h> | 
| Andrew Morton | faccbd4 | 2006-12-10 02:19:35 -0800 | [diff] [blame] | 28 | #include <linux/task_io_accounting_ops.h> | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 29 | #include <linux/fault-inject.h> | 
| Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 30 |  | 
 | 31 | #define CREATE_TRACE_POINTS | 
 | 32 | #include <trace/events/block.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 34 | #include "blk.h" | 
 | 35 |  | 
| Ingo Molnar | 0bfc245 | 2008-11-26 11:59:56 +0100 | [diff] [blame] | 36 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); | 
| Jun'ichi Nomura | b0da3f0 | 2009-10-01 21:16:13 +0200 | [diff] [blame] | 37 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 
| Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 38 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); | 
| Ingo Molnar | 0bfc245 | 2008-11-26 11:59:56 +0100 | [diff] [blame] | 39 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 40 | static int __make_request(struct request_queue *q, struct bio *bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
 | 42 | /* | 
 | 43 |  * For the allocated request tables | 
 | 44 |  */ | 
| Adrian Bunk | 5ece6c5 | 2008-02-18 13:45:51 +0100 | [diff] [blame] | 45 | static struct kmem_cache *request_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 |  | 
 | 47 | /* | 
 | 48 |  * For queue allocation | 
 | 49 |  */ | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 50 | struct kmem_cache *blk_requestq_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 |  | 
 | 52 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 |  * Controlling structure to kblockd | 
 | 54 |  */ | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 55 | static struct workqueue_struct *kblockd_workqueue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 |  | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 57 | static void drive_stat_acct(struct request *rq, int new_io) | 
 | 58 | { | 
| Jens Axboe | 28f1370 | 2008-05-07 10:15:46 +0200 | [diff] [blame] | 59 | 	struct hd_struct *part; | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 60 | 	int rw = rq_data_dir(rq); | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 61 | 	int cpu; | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 62 |  | 
| Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 63 | 	if (!blk_do_io_stat(rq)) | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 64 | 		return; | 
 | 65 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 66 | 	cpu = part_stat_lock(); | 
| Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 67 | 	part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 68 |  | 
| Jens Axboe | 28f1370 | 2008-05-07 10:15:46 +0200 | [diff] [blame] | 69 | 	if (!new_io) | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 70 | 		part_stat_inc(cpu, part, merges[rw]); | 
| Jens Axboe | 28f1370 | 2008-05-07 10:15:46 +0200 | [diff] [blame] | 71 | 	else { | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 72 | 		part_round_stats(cpu, part); | 
| Jens Axboe | 0f78ab9 | 2009-10-04 21:04:38 +0200 | [diff] [blame] | 73 | 		part_inc_in_flight(part); | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 74 | 	} | 
| Tejun Heo | e71bf0d | 2008-09-03 09:03:02 +0200 | [diff] [blame] | 75 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 76 | 	part_stat_unlock(); | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 77 | } | 
 | 78 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 79 | void blk_queue_congestion_threshold(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | { | 
 | 81 | 	int nr; | 
 | 82 |  | 
 | 83 | 	nr = q->nr_requests - (q->nr_requests / 8) + 1; | 
 | 84 | 	if (nr > q->nr_requests) | 
 | 85 | 		nr = q->nr_requests; | 
 | 86 | 	q->nr_congestion_on = nr; | 
 | 87 |  | 
 | 88 | 	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; | 
 | 89 | 	if (nr < 1) | 
 | 90 | 		nr = 1; | 
 | 91 | 	q->nr_congestion_off = nr; | 
 | 92 | } | 
 | 93 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | /** | 
 | 95 |  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | 
 | 96 |  * @bdev:	device | 
 | 97 |  * | 
 | 98 |  * Locates the passed device's request queue and returns the address of its | 
 | 99 |  * backing_dev_info | 
 | 100 |  * | 
 | 101 |  * Will return NULL if the request queue cannot be located. | 
 | 102 |  */ | 
 | 103 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | 
 | 104 | { | 
 | 105 | 	struct backing_dev_info *ret = NULL; | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 106 | 	struct request_queue *q = bdev_get_queue(bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 |  | 
 | 108 | 	if (q) | 
 | 109 | 		ret = &q->backing_dev_info; | 
 | 110 | 	return ret; | 
 | 111 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | EXPORT_SYMBOL(blk_get_backing_dev_info); | 
 | 113 |  | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 114 | void blk_rq_init(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | { | 
| FUJITA Tomonori | 1afb20f | 2008-04-25 12:26:28 +0200 | [diff] [blame] | 116 | 	memset(rq, 0, sizeof(*rq)); | 
 | 117 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | 	INIT_LIST_HEAD(&rq->queuelist); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 119 | 	INIT_LIST_HEAD(&rq->timeout_list); | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 120 | 	rq->cpu = -1; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 121 | 	rq->q = q; | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 122 | 	rq->__sector = (sector_t) -1; | 
| Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 123 | 	INIT_HLIST_NODE(&rq->hash); | 
 | 124 | 	RB_CLEAR_NODE(&rq->rb_node); | 
| FUJITA Tomonori | d7e3c32 | 2008-04-29 09:54:39 +0200 | [diff] [blame] | 125 | 	rq->cmd = rq->__cmd; | 
| Li Zefan | e2494e1 | 2009-04-02 13:43:26 +0800 | [diff] [blame] | 126 | 	rq->cmd_len = BLK_MAX_CDB; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 127 | 	rq->tag = -1; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 128 | 	rq->ref_count = 1; | 
| Tejun Heo | b243ddc | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 129 | 	rq->start_time = jiffies; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | } | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 131 | EXPORT_SYMBOL(blk_rq_init); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 |  | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 133 | static void req_bio_endio(struct request *rq, struct bio *bio, | 
 | 134 | 			  unsigned int nbytes, int error) | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 135 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 136 | 	struct request_queue *q = rq->q; | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 137 |  | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 138 | 	if (&q->bar_rq != rq) { | 
 | 139 | 		if (error) | 
 | 140 | 			clear_bit(BIO_UPTODATE, &bio->bi_flags); | 
 | 141 | 		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 
 | 142 | 			error = -EIO; | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 143 |  | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 144 | 		if (unlikely(nbytes > bio->bi_size)) { | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 145 | 			printk(KERN_ERR "%s: want %u bytes done, %u left\n", | 
| Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 146 | 			       __func__, nbytes, bio->bi_size); | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 147 | 			nbytes = bio->bi_size; | 
 | 148 | 		} | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 149 |  | 
| Keith Mannthey | 08bafc0 | 2008-11-25 10:24:35 +0100 | [diff] [blame] | 150 | 		if (unlikely(rq->cmd_flags & REQ_QUIET)) | 
 | 151 | 			set_bit(BIO_QUIET, &bio->bi_flags); | 
 | 152 |  | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 153 | 		bio->bi_size -= nbytes; | 
 | 154 | 		bio->bi_sector += (nbytes >> 9); | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 155 |  | 
 | 156 | 		if (bio_integrity(bio)) | 
 | 157 | 			bio_integrity_advance(bio, nbytes); | 
 | 158 |  | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 159 | 		if (bio->bi_size == 0) | 
| NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 160 | 			bio_endio(bio, error); | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 161 | 	} else { | 
 | 162 |  | 
 | 163 | 		/* | 
 | 164 | 		 * Okay, this is the barrier request in progress, just | 
 | 165 | 		 * record the error; | 
 | 166 | 		 */ | 
 | 167 | 		if (error && !q->orderr) | 
 | 168 | 			q->orderr = error; | 
 | 169 | 	} | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 170 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | void blk_dump_rq_flags(struct request *rq, char *msg) | 
 | 173 | { | 
 | 174 | 	int bit; | 
 | 175 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 176 | 	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 177 | 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, | 
 | 178 | 		rq->cmd_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  | 
| Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 180 | 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n", | 
 | 181 | 	       (unsigned long long)blk_rq_pos(rq), | 
 | 182 | 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 183 | 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n", | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 184 | 	       rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 186 | 	if (blk_pc_request(rq)) { | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 187 | 		printk(KERN_INFO "  cdb: "); | 
| FUJITA Tomonori | d34c87e | 2008-04-29 14:37:52 +0200 | [diff] [blame] | 188 | 		for (bit = 0; bit < BLK_MAX_CDB; bit++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | 			printk("%02x ", rq->cmd[bit]); | 
 | 190 | 		printk("\n"); | 
 | 191 | 	} | 
 | 192 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | EXPORT_SYMBOL(blk_dump_rq_flags); | 
 | 194 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | /* | 
 | 196 |  * "plug" the device if there are no outstanding requests: this will | 
 | 197 |  * force the transfer to start only after we have put all the requests | 
 | 198 |  * on the list. | 
 | 199 |  * | 
 | 200 |  * This is called with interrupts off and no requests on the queue and | 
 | 201 |  * with the queue lock held. | 
 | 202 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 203 | void blk_plug_device(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | { | 
 | 205 | 	WARN_ON(!irqs_disabled()); | 
 | 206 |  | 
 | 207 | 	/* | 
 | 208 | 	 * don't plug a stopped queue, it must be paired with blk_start_queue() | 
 | 209 | 	 * which will restart the queueing | 
 | 210 | 	 */ | 
| Coywolf Qi Hunt | 7daac49 | 2006-04-19 10:14:49 +0200 | [diff] [blame] | 211 | 	if (blk_queue_stopped(q)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | 		return; | 
 | 213 |  | 
| Jens Axboe | e48ec69 | 2008-07-03 13:18:54 +0200 | [diff] [blame] | 214 | 	if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | 		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 216 | 		trace_block_plug(q); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 217 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | EXPORT_SYMBOL(blk_plug_device); | 
 | 220 |  | 
| Jens Axboe | 6c5e0c4 | 2008-08-01 20:31:32 +0200 | [diff] [blame] | 221 | /** | 
 | 222 |  * blk_plug_device_unlocked - plug a device without queue lock held | 
 | 223 |  * @q:    The &struct request_queue to plug | 
 | 224 |  * | 
 | 225 |  * Description: | 
 | 226 |  *   Like @blk_plug_device(), but grabs the queue lock and disables | 
 | 227 |  *   interrupts. | 
 | 228 |  **/ | 
 | 229 | void blk_plug_device_unlocked(struct request_queue *q) | 
 | 230 | { | 
 | 231 | 	unsigned long flags; | 
 | 232 |  | 
 | 233 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 234 | 	blk_plug_device(q); | 
 | 235 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 236 | } | 
 | 237 | EXPORT_SYMBOL(blk_plug_device_unlocked); | 
 | 238 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | /* | 
 | 240 |  * remove the queue from the plugged list, if present. called with | 
 | 241 |  * queue lock held and interrupts disabled. | 
 | 242 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 243 | int blk_remove_plug(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | { | 
 | 245 | 	WARN_ON(!irqs_disabled()); | 
 | 246 |  | 
| Jens Axboe | e48ec69 | 2008-07-03 13:18:54 +0200 | [diff] [blame] | 247 | 	if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | 		return 0; | 
 | 249 |  | 
 | 250 | 	del_timer(&q->unplug_timer); | 
 | 251 | 	return 1; | 
 | 252 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | EXPORT_SYMBOL(blk_remove_plug); | 
 | 254 |  | 
 | 255 | /* | 
 | 256 |  * remove the plug and let it rip.. | 
 | 257 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 258 | void __generic_unplug_device(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | { | 
| Coywolf Qi Hunt | 7daac49 | 2006-04-19 10:14:49 +0200 | [diff] [blame] | 260 | 	if (unlikely(blk_queue_stopped(q))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | 		return; | 
| Jens Axboe | a31a973 | 2008-10-17 13:58:29 +0200 | [diff] [blame] | 262 | 	if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | 		return; | 
 | 264 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 265 | 	q->request_fn(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 |  | 
 | 268 | /** | 
 | 269 |  * generic_unplug_device - fire a request queue | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 270 |  * @q:    The &struct request_queue in question | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 |  * | 
 | 272 |  * Description: | 
 | 273 |  *   Linux uses plugging to build bigger requests queues before letting | 
 | 274 |  *   the device have at them. If a queue is plugged, the I/O scheduler | 
 | 275 |  *   is still adding and merging requests on the queue. Once the queue | 
 | 276 |  *   gets unplugged, the request_fn defined for the queue is invoked and | 
 | 277 |  *   transfers started. | 
 | 278 |  **/ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 279 | void generic_unplug_device(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | { | 
| Jens Axboe | dbaf2c0 | 2008-05-07 09:48:17 +0200 | [diff] [blame] | 281 | 	if (blk_queue_plugged(q)) { | 
 | 282 | 		spin_lock_irq(q->queue_lock); | 
 | 283 | 		__generic_unplug_device(q); | 
 | 284 | 		spin_unlock_irq(q->queue_lock); | 
 | 285 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | } | 
 | 287 | EXPORT_SYMBOL(generic_unplug_device); | 
 | 288 |  | 
 | 289 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | 
 | 290 | 				   struct page *page) | 
 | 291 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 292 | 	struct request_queue *q = bdi->unplug_io_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 |  | 
| Alan D. Brunelle | 2ad8b1e | 2007-11-07 14:26:56 -0500 | [diff] [blame] | 294 | 	blk_unplug(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | } | 
 | 296 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 297 | void blk_unplug_work(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 299 | 	struct request_queue *q = | 
 | 300 | 		container_of(work, struct request_queue, unplug_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 |  | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 302 | 	trace_block_unplug_io(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | 	q->unplug_fn(q); | 
 | 304 | } | 
 | 305 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 306 | void blk_unplug_timeout(unsigned long data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 308 | 	struct request_queue *q = (struct request_queue *)data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 |  | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 310 | 	trace_block_unplug_timer(q); | 
| Jens Axboe | 18887ad | 2008-07-28 13:08:45 +0200 | [diff] [blame] | 311 | 	kblockd_schedule_work(q, &q->unplug_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | } | 
 | 313 |  | 
| Alan D. Brunelle | 2ad8b1e | 2007-11-07 14:26:56 -0500 | [diff] [blame] | 314 | void blk_unplug(struct request_queue *q) | 
 | 315 | { | 
 | 316 | 	/* | 
 | 317 | 	 * devices don't necessarily have an ->unplug_fn defined | 
 | 318 | 	 */ | 
 | 319 | 	if (q->unplug_fn) { | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 320 | 		trace_block_unplug_io(q); | 
| Alan D. Brunelle | 2ad8b1e | 2007-11-07 14:26:56 -0500 | [diff] [blame] | 321 | 		q->unplug_fn(q); | 
 | 322 | 	} | 
 | 323 | } | 
 | 324 | EXPORT_SYMBOL(blk_unplug); | 
 | 325 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | /** | 
 | 327 |  * blk_start_queue - restart a previously stopped queue | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 328 |  * @q:    The &struct request_queue in question | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 |  * | 
 | 330 |  * Description: | 
 | 331 |  *   blk_start_queue() will clear the stop flag on the queue, and call | 
 | 332 |  *   the request_fn for the queue if it was in a stopped state when | 
 | 333 |  *   entered. Also see blk_stop_queue(). Queue lock must be held. | 
 | 334 |  **/ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 335 | void blk_start_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | { | 
| Paolo 'Blaisorblade' Giarrusso | a038e25 | 2006-06-05 12:09:01 +0200 | [diff] [blame] | 337 | 	WARN_ON(!irqs_disabled()); | 
 | 338 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 339 | 	queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 
| Tejun Heo | a538cd0 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 340 | 	__blk_run_queue(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | EXPORT_SYMBOL(blk_start_queue); | 
 | 343 |  | 
 | 344 | /** | 
 | 345 |  * blk_stop_queue - stop a queue | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 346 |  * @q:    The &struct request_queue in question | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 |  * | 
 | 348 |  * Description: | 
 | 349 |  *   The Linux block layer assumes that a block driver will consume all | 
 | 350 |  *   entries on the request queue when the request_fn strategy is called. | 
 | 351 |  *   Often this will not happen, because of hardware limitations (queue | 
 | 352 |  *   depth settings). If a device driver gets a 'queue full' response, | 
 | 353 |  *   or if it simply chooses not to queue more I/O at one point, it can | 
 | 354 |  *   call this function to prevent the request_fn from being called until | 
 | 355 |  *   the driver has signalled it's ready to go again. This happens by calling | 
 | 356 |  *   blk_start_queue() to restart queue operations. Queue lock must be held. | 
 | 357 |  **/ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 358 | void blk_stop_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | { | 
 | 360 | 	blk_remove_plug(q); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 361 | 	queue_flag_set(QUEUE_FLAG_STOPPED, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | } | 
 | 363 | EXPORT_SYMBOL(blk_stop_queue); | 
 | 364 |  | 
 | 365 | /** | 
 | 366 |  * blk_sync_queue - cancel any pending callbacks on a queue | 
 | 367 |  * @q: the queue | 
 | 368 |  * | 
 | 369 |  * Description: | 
 | 370 |  *     The block layer may perform asynchronous callback activity | 
 | 371 |  *     on a queue, such as calling the unplug function after a timeout. | 
 | 372 |  *     A block device may call blk_sync_queue to ensure that any | 
 | 373 |  *     such activity is cancelled, thus allowing it to release resources | 
| Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 374 |  *     that the callbacks might use. The caller must already have made sure | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 |  *     that its ->make_request_fn will not re-add plugging prior to calling | 
 | 376 |  *     this function. | 
 | 377 |  * | 
 | 378 |  */ | 
 | 379 | void blk_sync_queue(struct request_queue *q) | 
 | 380 | { | 
 | 381 | 	del_timer_sync(&q->unplug_timer); | 
| Jens Axboe | 70ed28b | 2008-11-19 14:38:39 +0100 | [diff] [blame] | 382 | 	del_timer_sync(&q->timeout); | 
| Cheng Renquan | 64d01dc | 2008-12-03 12:41:39 +0100 | [diff] [blame] | 383 | 	cancel_work_sync(&q->unplug_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | } | 
 | 385 | EXPORT_SYMBOL(blk_sync_queue); | 
 | 386 |  | 
 | 387 | /** | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 388 |  * __blk_run_queue - run a single device queue | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 |  * @q:	The queue to run | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 390 |  * | 
 | 391 |  * Description: | 
 | 392 |  *    See @blk_run_queue. This variant must be called with the queue lock | 
 | 393 |  *    held and interrupts disabled. | 
 | 394 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 |  */ | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 396 | void __blk_run_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | 	blk_remove_plug(q); | 
| Jens Axboe | dac07ec | 2006-05-11 08:20:16 +0200 | [diff] [blame] | 399 |  | 
| Tejun Heo | a538cd0 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 400 | 	if (unlikely(blk_queue_stopped(q))) | 
 | 401 | 		return; | 
 | 402 |  | 
 | 403 | 	if (elv_queue_empty(q)) | 
 | 404 | 		return; | 
 | 405 |  | 
| Jens Axboe | dac07ec | 2006-05-11 08:20:16 +0200 | [diff] [blame] | 406 | 	/* | 
 | 407 | 	 * Only recurse once to avoid overrunning the stack, let the unplug | 
 | 408 | 	 * handling reinvoke the handler shortly if we already got there. | 
 | 409 | 	 */ | 
| Tejun Heo | a538cd0 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 410 | 	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 
 | 411 | 		q->request_fn(q); | 
 | 412 | 		queue_flag_clear(QUEUE_FLAG_REENTER, q); | 
 | 413 | 	} else { | 
 | 414 | 		queue_flag_set(QUEUE_FLAG_PLUGGED, q); | 
 | 415 | 		kblockd_schedule_work(q, &q->unplug_work); | 
 | 416 | 	} | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 417 | } | 
 | 418 | EXPORT_SYMBOL(__blk_run_queue); | 
| Jens Axboe | dac07ec | 2006-05-11 08:20:16 +0200 | [diff] [blame] | 419 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 420 | /** | 
 | 421 |  * blk_run_queue - run a single device queue | 
 | 422 |  * @q: The queue to run | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 423 |  * | 
 | 424 |  * Description: | 
 | 425 |  *    Invoke request handling on this queue, if it has pending work to do. | 
| Tejun Heo | a7f5579 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 426 |  *    May be used to restart queueing when a request has completed. | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 427 |  */ | 
 | 428 | void blk_run_queue(struct request_queue *q) | 
 | 429 | { | 
 | 430 | 	unsigned long flags; | 
 | 431 |  | 
 | 432 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 433 | 	__blk_run_queue(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 435 | } | 
 | 436 | EXPORT_SYMBOL(blk_run_queue); | 
 | 437 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 438 | void blk_put_queue(struct request_queue *q) | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 439 | { | 
 | 440 | 	kobject_put(&q->kobj); | 
 | 441 | } | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 442 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 443 | void blk_cleanup_queue(struct request_queue *q) | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 444 | { | 
| Jens Axboe | e3335de | 2008-09-18 09:22:54 -0700 | [diff] [blame] | 445 | 	/* | 
 | 446 | 	 * We know we have process context here, so we can be a little | 
 | 447 | 	 * cautious and ensure that pending block actions on this device | 
 | 448 | 	 * are done before moving on. Going into this function, we should | 
 | 449 | 	 * not have processes doing IO to this device. | 
 | 450 | 	 */ | 
 | 451 | 	blk_sync_queue(q); | 
 | 452 |  | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 453 | 	mutex_lock(&q->sysfs_lock); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 454 | 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 455 | 	mutex_unlock(&q->sysfs_lock); | 
 | 456 |  | 
 | 457 | 	if (q->elevator) | 
 | 458 | 		elevator_exit(q->elevator); | 
 | 459 |  | 
 | 460 | 	blk_put_queue(q); | 
 | 461 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | EXPORT_SYMBOL(blk_cleanup_queue); | 
 | 463 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 464 | static int blk_init_free_list(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | { | 
 | 466 | 	struct request_list *rl = &q->rq; | 
 | 467 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 468 | 	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; | 
 | 469 | 	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 470 | 	rl->elvpriv = 0; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 471 | 	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); | 
 | 472 | 	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 |  | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 474 | 	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 
 | 475 | 				mempool_free_slab, request_cachep, q->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 |  | 
 | 477 | 	if (!rl->rq_pool) | 
 | 478 | 		return -ENOMEM; | 
 | 479 |  | 
 | 480 | 	return 0; | 
 | 481 | } | 
 | 482 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 483 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | { | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 485 | 	return blk_alloc_queue_node(gfp_mask, -1); | 
 | 486 | } | 
 | 487 | EXPORT_SYMBOL(blk_alloc_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 489 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 490 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 491 | 	struct request_queue *q; | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 492 | 	int err; | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 493 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 494 | 	q = kmem_cache_alloc_node(blk_requestq_cachep, | 
| Christoph Lameter | 94f6030 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 495 | 				gfp_mask | __GFP_ZERO, node_id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | 	if (!q) | 
 | 497 | 		return NULL; | 
 | 498 |  | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 499 | 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | 
 | 500 | 	q->backing_dev_info.unplug_io_data = q; | 
| Jens Axboe | 0989a02 | 2009-06-12 14:42:56 +0200 | [diff] [blame] | 501 | 	q->backing_dev_info.ra_pages = | 
 | 502 | 			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 
 | 503 | 	q->backing_dev_info.state = 0; | 
 | 504 | 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | 
| Jens Axboe | d993831 | 2009-06-12 14:45:52 +0200 | [diff] [blame] | 505 | 	q->backing_dev_info.name = "block"; | 
| Jens Axboe | 0989a02 | 2009-06-12 14:42:56 +0200 | [diff] [blame] | 506 |  | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 507 | 	err = bdi_init(&q->backing_dev_info); | 
 | 508 | 	if (err) { | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 509 | 		kmem_cache_free(blk_requestq_cachep, q); | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 510 | 		return NULL; | 
 | 511 | 	} | 
 | 512 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | 	init_timer(&q->unplug_timer); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 514 | 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 
 | 515 | 	INIT_LIST_HEAD(&q->timeout_list); | 
| Peter Zijlstra | 713ada9 | 2008-10-16 13:44:57 +0200 | [diff] [blame] | 516 | 	INIT_WORK(&q->unplug_work, blk_unplug_work); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 517 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 518 | 	kobject_init(&q->kobj, &blk_queue_ktype); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 520 | 	mutex_init(&q->sysfs_lock); | 
| Neil Brown | e7e72bf | 2008-05-14 16:05:54 -0700 | [diff] [blame] | 521 | 	spin_lock_init(&q->__queue_lock); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 522 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | 	return q; | 
 | 524 | } | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 525 | EXPORT_SYMBOL(blk_alloc_queue_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 |  | 
 | 527 | /** | 
 | 528 |  * blk_init_queue  - prepare a request queue for use with a block device | 
 | 529 |  * @rfn:  The function to be called to process requests that have been | 
 | 530 |  *        placed on the queue. | 
 | 531 |  * @lock: Request queue spin lock | 
 | 532 |  * | 
 | 533 |  * Description: | 
 | 534 |  *    If a block device wishes to use the standard request handling procedures, | 
 | 535 |  *    which sorts requests and coalesces adjacent requests, then it must | 
 | 536 |  *    call blk_init_queue().  The function @rfn will be called when there | 
 | 537 |  *    are requests on the queue that need to be processed.  If the device | 
 | 538 |  *    supports plugging, then @rfn may not be called immediately when requests | 
 | 539 |  *    are available on the queue, but may be called at some time later instead. | 
 | 540 |  *    Plugged queues are generally unplugged when a buffer belonging to one | 
 | 541 |  *    of the requests on the queue is needed, or due to memory pressure. | 
 | 542 |  * | 
 | 543 |  *    @rfn is not required, or even expected, to remove all requests off the | 
 | 544 |  *    queue, but only as many as it can handle at a time.  If it does leave | 
 | 545 |  *    requests on the queue, it is responsible for arranging that the requests | 
 | 546 |  *    get dealt with eventually. | 
 | 547 |  * | 
 | 548 |  *    The queue spin lock must be held while manipulating the requests on the | 
| Paolo 'Blaisorblade' Giarrusso | a038e25 | 2006-06-05 12:09:01 +0200 | [diff] [blame] | 549 |  *    request queue; this lock will be taken also from interrupt context, so irq | 
 | 550 |  *    disabling is needed for it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 |  * | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 552 |  *    Function returns a pointer to the initialized request queue, or %NULL if | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 |  *    it didn't succeed. | 
 | 554 |  * | 
 | 555 |  * Note: | 
 | 556 |  *    blk_init_queue() must be paired with a blk_cleanup_queue() call | 
 | 557 |  *    when the block device is deactivated (such as at module unload). | 
 | 558 |  **/ | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 559 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 560 | struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | { | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 562 | 	return blk_init_queue_node(rfn, lock, -1); | 
 | 563 | } | 
 | 564 | EXPORT_SYMBOL(blk_init_queue); | 
 | 565 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 566 | struct request_queue * | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 567 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | 
 | 568 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 569 | 	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 |  | 
 | 571 | 	if (!q) | 
 | 572 | 		return NULL; | 
 | 573 |  | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 574 | 	q->node = node_id; | 
| Al Viro | 8669aaf | 2006-03-18 13:50:00 -0500 | [diff] [blame] | 575 | 	if (blk_init_free_list(q)) { | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 576 | 		kmem_cache_free(blk_requestq_cachep, q); | 
| Al Viro | 8669aaf | 2006-03-18 13:50:00 -0500 | [diff] [blame] | 577 | 		return NULL; | 
 | 578 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 |  | 
 | 580 | 	q->request_fn		= rfn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | 	q->prep_rq_fn		= NULL; | 
 | 582 | 	q->unplug_fn		= generic_unplug_device; | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 583 | 	q->queue_flags		= QUEUE_FLAG_DEFAULT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | 	q->queue_lock		= lock; | 
 | 585 |  | 
| Jens Axboe | f3b144a | 2009-03-06 08:48:33 +0100 | [diff] [blame] | 586 | 	/* | 
 | 587 | 	 * This also sets hw/phys segments, boundary and size | 
 | 588 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | 	blk_queue_make_request(q, __make_request); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 |  | 
| Alan Stern | 44ec954 | 2007-02-20 11:01:57 -0500 | [diff] [blame] | 591 | 	q->sg_reserved_size = INT_MAX; | 
 | 592 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | 	/* | 
 | 594 | 	 * all done | 
 | 595 | 	 */ | 
 | 596 | 	if (!elevator_init(q, NULL)) { | 
 | 597 | 		blk_queue_congestion_threshold(q); | 
 | 598 | 		return q; | 
 | 599 | 	} | 
 | 600 |  | 
| Al Viro | 8669aaf | 2006-03-18 13:50:00 -0500 | [diff] [blame] | 601 | 	blk_put_queue(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | 	return NULL; | 
 | 603 | } | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 604 | EXPORT_SYMBOL(blk_init_queue_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 606 | int blk_get_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | { | 
| Nick Piggin | fde6ad2 | 2005-06-23 00:08:53 -0700 | [diff] [blame] | 608 | 	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 609 | 		kobject_get(&q->kobj); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | 		return 0; | 
 | 611 | 	} | 
 | 612 |  | 
 | 613 | 	return 1; | 
 | 614 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 616 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | { | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 618 | 	if (rq->cmd_flags & REQ_ELVPRIV) | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 619 | 		elv_put_request(q, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | 	mempool_free(rq, q->rq.rq_pool); | 
 | 621 | } | 
 | 622 |  | 
| Jens Axboe | 1ea25ec | 2006-07-18 22:24:11 +0200 | [diff] [blame] | 623 | static struct request * | 
| Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 624 | blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | { | 
 | 626 | 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 
 | 627 |  | 
 | 628 | 	if (!rq) | 
 | 629 | 		return NULL; | 
 | 630 |  | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 631 | 	blk_rq_init(q, rq); | 
| FUJITA Tomonori | 1afb20f | 2008-04-25 12:26:28 +0200 | [diff] [blame] | 632 |  | 
| Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 633 | 	rq->cmd_flags = flags | REQ_ALLOCED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 |  | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 635 | 	if (priv) { | 
| Jens Axboe | cb78b28 | 2006-07-28 09:32:57 +0200 | [diff] [blame] | 636 | 		if (unlikely(elv_set_request(q, rq, gfp_mask))) { | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 637 | 			mempool_free(rq, q->rq.rq_pool); | 
 | 638 | 			return NULL; | 
 | 639 | 		} | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 640 | 		rq->cmd_flags |= REQ_ELVPRIV; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 641 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 |  | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 643 | 	return rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | } | 
 | 645 |  | 
 | 646 | /* | 
 | 647 |  * ioc_batching returns true if the ioc is a valid batching request and | 
 | 648 |  * should be given priority access to a request. | 
 | 649 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 650 | static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | { | 
 | 652 | 	if (!ioc) | 
 | 653 | 		return 0; | 
 | 654 |  | 
 | 655 | 	/* | 
 | 656 | 	 * Make sure the process is able to allocate at least 1 request | 
 | 657 | 	 * even if the batch times out, otherwise we could theoretically | 
 | 658 | 	 * lose wakeups. | 
 | 659 | 	 */ | 
 | 660 | 	return ioc->nr_batch_requests == q->nr_batching || | 
 | 661 | 		(ioc->nr_batch_requests > 0 | 
 | 662 | 		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); | 
 | 663 | } | 
 | 664 |  | 
 | 665 | /* | 
 | 666 |  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This | 
 | 667 |  * will cause the process to be a "batcher" on all queues in the system. This | 
 | 668 |  * is the behaviour we want though - once it gets a wakeup it should be given | 
 | 669 |  * a nice run. | 
 | 670 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 671 | static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | { | 
 | 673 | 	if (!ioc || ioc_batching(q, ioc)) | 
 | 674 | 		return; | 
 | 675 |  | 
 | 676 | 	ioc->nr_batch_requests = q->nr_batching; | 
 | 677 | 	ioc->last_waited = jiffies; | 
 | 678 | } | 
 | 679 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 680 | static void __freed_request(struct request_queue *q, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | { | 
 | 682 | 	struct request_list *rl = &q->rq; | 
 | 683 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 684 | 	if (rl->count[sync] < queue_congestion_off_threshold(q)) | 
 | 685 | 		blk_clear_queue_congested(q, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 687 | 	if (rl->count[sync] + 1 <= q->nr_requests) { | 
 | 688 | 		if (waitqueue_active(&rl->wait[sync])) | 
 | 689 | 			wake_up(&rl->wait[sync]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 691 | 		blk_clear_queue_full(q, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | 	} | 
 | 693 | } | 
 | 694 |  | 
 | 695 | /* | 
 | 696 |  * A request has just been released.  Account for it, update the full and | 
 | 697 |  * congestion status, wake up any waiters.   Called under q->queue_lock. | 
 | 698 |  */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 699 | static void freed_request(struct request_queue *q, int sync, int priv) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | { | 
 | 701 | 	struct request_list *rl = &q->rq; | 
 | 702 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 703 | 	rl->count[sync]--; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 704 | 	if (priv) | 
 | 705 | 		rl->elvpriv--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 707 | 	__freed_request(q, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 709 | 	if (unlikely(rl->starved[sync ^ 1])) | 
 | 710 | 		__freed_request(q, sync ^ 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | } | 
 | 712 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | /* | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 714 |  * Get a free request, queue_lock must be held. | 
 | 715 |  * Returns NULL on failure, with queue_lock held. | 
 | 716 |  * Returns !NULL on success, with queue_lock *not held*. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 718 | static struct request *get_request(struct request_queue *q, int rw_flags, | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 719 | 				   struct bio *bio, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | { | 
 | 721 | 	struct request *rq = NULL; | 
 | 722 | 	struct request_list *rl = &q->rq; | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 723 | 	struct io_context *ioc = NULL; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 724 | 	const bool is_sync = rw_is_sync(rw_flags) != 0; | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 725 | 	int may_queue, priv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 |  | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 727 | 	may_queue = elv_may_queue(q, rw_flags); | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 728 | 	if (may_queue == ELV_MQUEUE_NO) | 
 | 729 | 		goto rq_starved; | 
 | 730 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 731 | 	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { | 
 | 732 | 		if (rl->count[is_sync]+1 >= q->nr_requests) { | 
| Jens Axboe | b5deef9 | 2006-07-19 23:39:40 +0200 | [diff] [blame] | 733 | 			ioc = current_io_context(GFP_ATOMIC, q->node); | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 734 | 			/* | 
 | 735 | 			 * The queue will fill after this allocation, so set | 
 | 736 | 			 * it as full, and mark this process as "batching". | 
 | 737 | 			 * This process will be allowed to complete a batch of | 
 | 738 | 			 * requests, others will be blocked. | 
 | 739 | 			 */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 740 | 			if (!blk_queue_full(q, is_sync)) { | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 741 | 				ioc_set_batching(q, ioc); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 742 | 				blk_set_queue_full(q, is_sync); | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 743 | 			} else { | 
 | 744 | 				if (may_queue != ELV_MQUEUE_MUST | 
 | 745 | 						&& !ioc_batching(q, ioc)) { | 
 | 746 | 					/* | 
 | 747 | 					 * The queue is full and the allocating | 
 | 748 | 					 * process is not a "batcher", and not | 
 | 749 | 					 * exempted by the IO scheduler | 
 | 750 | 					 */ | 
 | 751 | 					goto out; | 
 | 752 | 				} | 
 | 753 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | 		} | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 755 | 		blk_set_queue_congested(q, is_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | 	} | 
 | 757 |  | 
| Jens Axboe | 082cf69 | 2005-06-28 16:35:11 +0200 | [diff] [blame] | 758 | 	/* | 
 | 759 | 	 * Only allow batching queuers to allocate up to 50% over the defined | 
 | 760 | 	 * limit of requests, otherwise we could have thousands of requests | 
 | 761 | 	 * allocated with any setting of ->nr_requests | 
 | 762 | 	 */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 763 | 	if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) | 
| Jens Axboe | 082cf69 | 2005-06-28 16:35:11 +0200 | [diff] [blame] | 764 | 		goto out; | 
| Hugh Dickins | fd782a4 | 2005-06-29 15:15:40 +0100 | [diff] [blame] | 765 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 766 | 	rl->count[is_sync]++; | 
 | 767 | 	rl->starved[is_sync] = 0; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 768 |  | 
| Jens Axboe | 64521d1 | 2005-10-28 08:30:39 +0200 | [diff] [blame] | 769 | 	priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 770 | 	if (priv) | 
 | 771 | 		rl->elvpriv++; | 
 | 772 |  | 
| Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 773 | 	if (blk_queue_io_stat(q)) | 
 | 774 | 		rw_flags |= REQ_IO_STAT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | 	spin_unlock_irq(q->queue_lock); | 
 | 776 |  | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 777 | 	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 778 | 	if (unlikely(!rq)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | 		/* | 
 | 780 | 		 * Allocation failed presumably due to memory. Undo anything | 
 | 781 | 		 * we might have messed up. | 
 | 782 | 		 * | 
 | 783 | 		 * Allocating task should really be put onto the front of the | 
 | 784 | 		 * wait queue, but this is pretty rare. | 
 | 785 | 		 */ | 
 | 786 | 		spin_lock_irq(q->queue_lock); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 787 | 		freed_request(q, is_sync, priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 |  | 
 | 789 | 		/* | 
 | 790 | 		 * in the very unlikely event that allocation failed and no | 
 | 791 | 		 * requests for this direction was pending, mark us starved | 
 | 792 | 		 * so that freeing of a request in the other direction will | 
 | 793 | 		 * notice us. another possible fix would be to split the | 
 | 794 | 		 * rq mempool into READ and WRITE | 
 | 795 | 		 */ | 
 | 796 | rq_starved: | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 797 | 		if (unlikely(rl->count[is_sync] == 0)) | 
 | 798 | 			rl->starved[is_sync] = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | 		goto out; | 
 | 801 | 	} | 
 | 802 |  | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 803 | 	/* | 
 | 804 | 	 * ioc may be NULL here, and ioc_batching will be false. That's | 
 | 805 | 	 * OK, if the queue is under the request limit then requests need | 
 | 806 | 	 * not count toward the nr_batch_requests limit. There will always | 
 | 807 | 	 * be some limit enforced by BLK_BATCH_TIME. | 
 | 808 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | 	if (ioc_batching(q, ioc)) | 
 | 810 | 		ioc->nr_batch_requests--; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 811 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 812 | 	trace_block_getrq(q, bio, rw_flags & 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | 	return rq; | 
 | 815 | } | 
 | 816 |  | 
 | 817 | /* | 
 | 818 |  * No available requests for this queue, unplug the device and wait for some | 
 | 819 |  * requests to become available. | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 820 |  * | 
 | 821 |  * Called with q->queue_lock held, and returns with it unlocked. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 823 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 824 | 					struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 826 | 	const bool is_sync = rw_is_sync(rw_flags) != 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | 	struct request *rq; | 
 | 828 |  | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 829 | 	rq = get_request(q, rw_flags, bio, GFP_NOIO); | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 830 | 	while (!rq) { | 
 | 831 | 		DEFINE_WAIT(wait); | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 832 | 		struct io_context *ioc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | 		struct request_list *rl = &q->rq; | 
 | 834 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 835 | 		prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | 				TASK_UNINTERRUPTIBLE); | 
 | 837 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 838 | 		trace_block_sleeprq(q, bio, rw_flags & 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 |  | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 840 | 		__generic_unplug_device(q); | 
 | 841 | 		spin_unlock_irq(q->queue_lock); | 
 | 842 | 		io_schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 |  | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 844 | 		/* | 
 | 845 | 		 * After sleeping, we become a "batching" process and | 
 | 846 | 		 * will be able to allocate at least one request, and | 
 | 847 | 		 * up to a big batch of them for a small period time. | 
 | 848 | 		 * See ioc_batching, ioc_set_batching | 
 | 849 | 		 */ | 
 | 850 | 		ioc = current_io_context(GFP_NOIO, q->node); | 
 | 851 | 		ioc_set_batching(q, ioc); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 852 |  | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 853 | 		spin_lock_irq(q->queue_lock); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 854 | 		finish_wait(&rl->wait[is_sync], &wait); | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 855 |  | 
 | 856 | 		rq = get_request(q, rw_flags, bio, GFP_NOIO); | 
 | 857 | 	}; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 |  | 
 | 859 | 	return rq; | 
 | 860 | } | 
 | 861 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 862 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | { | 
 | 864 | 	struct request *rq; | 
 | 865 |  | 
 | 866 | 	BUG_ON(rw != READ && rw != WRITE); | 
 | 867 |  | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 868 | 	spin_lock_irq(q->queue_lock); | 
 | 869 | 	if (gfp_mask & __GFP_WAIT) { | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 870 | 		rq = get_request_wait(q, rw, NULL); | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 871 | 	} else { | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 872 | 		rq = get_request(q, rw, NULL, gfp_mask); | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 873 | 		if (!rq) | 
 | 874 | 			spin_unlock_irq(q->queue_lock); | 
 | 875 | 	} | 
 | 876 | 	/* q->queue_lock is unlocked at this point */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 |  | 
 | 878 | 	return rq; | 
 | 879 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | EXPORT_SYMBOL(blk_get_request); | 
 | 881 |  | 
 | 882 | /** | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 883 |  * blk_make_request - given a bio, allocate a corresponding struct request. | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 884 |  * @q: target request queue | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 885 |  * @bio:  The bio describing the memory mappings that will be submitted for IO. | 
 | 886 |  *        It may be a chained-bio properly constructed by block/bio layer. | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 887 |  * @gfp_mask: gfp flags to be used for memory allocation | 
| Jens Axboe | dc72ef4a | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 888 |  * | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 889 |  * blk_make_request is the parallel of generic_make_request for BLOCK_PC | 
 | 890 |  * type commands. Where the struct request needs to be farther initialized by | 
 | 891 |  * the caller. It is passed a &struct bio, which describes the memory info of | 
 | 892 |  * the I/O transfer. | 
 | 893 |  * | 
 | 894 |  * The caller of blk_make_request must make sure that bi_io_vec | 
 | 895 |  * are set to describe the memory buffers. That bio_data_dir() will return | 
 | 896 |  * the needed direction of the request. (And all bio's in the passed bio-chain | 
 | 897 |  * are properly set accordingly) | 
 | 898 |  * | 
 | 899 |  * If called under none-sleepable conditions, mapped bio buffers must not | 
 | 900 |  * need bouncing, by calling the appropriate masked or flagged allocator, | 
 | 901 |  * suitable for the target device. Otherwise the call to blk_queue_bounce will | 
 | 902 |  * BUG. | 
| Jens Axboe | 53674ac | 2009-05-19 19:52:35 +0200 | [diff] [blame] | 903 |  * | 
 | 904 |  * WARNING: When allocating/cloning a bio-chain, careful consideration should be | 
 | 905 |  * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for | 
 | 906 |  * anything but the first bio in the chain. Otherwise you risk waiting for IO | 
 | 907 |  * completion of a bio that hasn't been submitted yet, thus resulting in a | 
 | 908 |  * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead | 
 | 909 |  * of bio_alloc(), as that avoids the mempool deadlock. | 
 | 910 |  * If possible a big IO should be split into smaller parts when allocation | 
 | 911 |  * fails. Partial allocation should not be an error, or you risk a live-lock. | 
| Jens Axboe | dc72ef4a | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 912 |  */ | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 913 | struct request *blk_make_request(struct request_queue *q, struct bio *bio, | 
 | 914 | 				 gfp_t gfp_mask) | 
| Jens Axboe | dc72ef4a | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 915 | { | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 916 | 	struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); | 
 | 917 |  | 
 | 918 | 	if (unlikely(!rq)) | 
 | 919 | 		return ERR_PTR(-ENOMEM); | 
 | 920 |  | 
 | 921 | 	for_each_bio(bio) { | 
 | 922 | 		struct bio *bounce_bio = bio; | 
 | 923 | 		int ret; | 
 | 924 |  | 
 | 925 | 		blk_queue_bounce(q, &bounce_bio); | 
 | 926 | 		ret = blk_rq_append_bio(q, rq, bounce_bio); | 
 | 927 | 		if (unlikely(ret)) { | 
 | 928 | 			blk_put_request(rq); | 
 | 929 | 			return ERR_PTR(ret); | 
 | 930 | 		} | 
 | 931 | 	} | 
 | 932 |  | 
 | 933 | 	return rq; | 
| Jens Axboe | dc72ef4a | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 934 | } | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 935 | EXPORT_SYMBOL(blk_make_request); | 
| Jens Axboe | dc72ef4a | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 936 |  | 
 | 937 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 |  * blk_requeue_request - put a request back on queue | 
 | 939 |  * @q:		request queue where request should be inserted | 
 | 940 |  * @rq:		request to be inserted | 
 | 941 |  * | 
 | 942 |  * Description: | 
 | 943 |  *    Drivers often keep queueing requests until the hardware cannot accept | 
 | 944 |  *    more, when that condition happens we need to put the request back | 
 | 945 |  *    on the queue. Must be called with queue lock held. | 
 | 946 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 947 | void blk_requeue_request(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | { | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 949 | 	blk_delete_timer(rq); | 
 | 950 | 	blk_clear_rq_complete(rq); | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 951 | 	trace_block_rq_requeue(q, rq); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 952 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | 	if (blk_rq_tagged(rq)) | 
 | 954 | 		blk_queue_end_tag(q, rq); | 
 | 955 |  | 
| James Bottomley | ba396a6 | 2009-05-27 14:17:08 +0200 | [diff] [blame] | 956 | 	BUG_ON(blk_queued_rq(rq)); | 
 | 957 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | 	elv_requeue_request(q, rq); | 
 | 959 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | EXPORT_SYMBOL(blk_requeue_request); | 
 | 961 |  | 
 | 962 | /** | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 963 |  * blk_insert_request - insert a special request into a request queue | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 |  * @q:		request queue where request should be inserted | 
 | 965 |  * @rq:		request to be inserted | 
 | 966 |  * @at_head:	insert request at head or tail of queue | 
 | 967 |  * @data:	private data | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 968 |  * | 
 | 969 |  * Description: | 
 | 970 |  *    Many block devices need to execute commands asynchronously, so they don't | 
 | 971 |  *    block the whole kernel from preemption during request execution.  This is | 
 | 972 |  *    accomplished normally by inserting aritficial requests tagged as | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 973 |  *    REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them | 
 | 974 |  *    be scheduled for actual execution by the request queue. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 |  * | 
 | 976 |  *    We have the option of inserting the head or the tail of the queue. | 
 | 977 |  *    Typically we use the tail for new ioctls and so forth.  We use the head | 
 | 978 |  *    of the queue for things like a QUEUE_FULL message from a device, or a | 
 | 979 |  *    host that is unable to accept a particular command. | 
 | 980 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 981 | void blk_insert_request(struct request_queue *q, struct request *rq, | 
| Tejun Heo  | 867d119 | 2005-04-24 02:06:05 -0500 | [diff] [blame] | 982 | 			int at_head, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | { | 
| Tejun Heo  | 867d119 | 2005-04-24 02:06:05 -0500 | [diff] [blame] | 984 | 	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | 	unsigned long flags; | 
 | 986 |  | 
 | 987 | 	/* | 
 | 988 | 	 * tell I/O scheduler that this isn't a regular read/write (ie it | 
 | 989 | 	 * must not attempt merges on this) and that it acts as a soft | 
 | 990 | 	 * barrier | 
 | 991 | 	 */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 992 | 	rq->cmd_type = REQ_TYPE_SPECIAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 |  | 
 | 994 | 	rq->special = data; | 
 | 995 |  | 
 | 996 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 997 |  | 
 | 998 | 	/* | 
 | 999 | 	 * If command is tagged, release the tag | 
 | 1000 | 	 */ | 
| Tejun Heo  | 867d119 | 2005-04-24 02:06:05 -0500 | [diff] [blame] | 1001 | 	if (blk_rq_tagged(rq)) | 
 | 1002 | 		blk_queue_end_tag(q, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 |  | 
| Jerome Marchand | b238b3d | 2007-10-23 15:05:46 +0200 | [diff] [blame] | 1004 | 	drive_stat_acct(rq, 1); | 
| Tejun Heo  | 867d119 | 2005-04-24 02:06:05 -0500 | [diff] [blame] | 1005 | 	__elv_add_request(q, rq, where, 0); | 
| Tejun Heo | a7f5579 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 1006 | 	__blk_run_queue(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 1008 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | EXPORT_SYMBOL(blk_insert_request); | 
 | 1010 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | /* | 
 | 1012 |  * add-request adds a request to the linked list. | 
 | 1013 |  * queue lock is held and interrupts disabled, as we muck with the | 
 | 1014 |  * request queue list. | 
 | 1015 |  */ | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1016 | static inline void add_request(struct request_queue *q, struct request *req) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | { | 
| Jerome Marchand | b238b3d | 2007-10-23 15:05:46 +0200 | [diff] [blame] | 1018 | 	drive_stat_acct(req, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | 	/* | 
 | 1021 | 	 * elevator indicated where it wants this request to be | 
 | 1022 | 	 * inserted at elevator_merge time | 
 | 1023 | 	 */ | 
 | 1024 | 	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); | 
 | 1025 | } | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1026 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1027 | static void part_round_stats_single(int cpu, struct hd_struct *part, | 
 | 1028 | 				    unsigned long now) | 
 | 1029 | { | 
 | 1030 | 	if (now == part->stamp) | 
 | 1031 | 		return; | 
 | 1032 |  | 
 | 1033 | 	if (part->in_flight) { | 
 | 1034 | 		__part_stat_add(cpu, part, time_in_queue, | 
| Jens Axboe | 0f78ab9 | 2009-10-04 21:04:38 +0200 | [diff] [blame] | 1035 | 				part->in_flight * (now - part->stamp)); | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1036 | 		__part_stat_add(cpu, part, io_ticks, (now - part->stamp)); | 
 | 1037 | 	} | 
 | 1038 | 	part->stamp = now; | 
 | 1039 | } | 
 | 1040 |  | 
 | 1041 | /** | 
| Randy Dunlap | 496aa8a | 2008-10-16 07:46:23 +0200 | [diff] [blame] | 1042 |  * part_round_stats() - Round off the performance stats on a struct disk_stats. | 
 | 1043 |  * @cpu: cpu number for stats access | 
 | 1044 |  * @part: target partition | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1045 |  * | 
 | 1046 |  * The average IO queue length and utilisation statistics are maintained | 
 | 1047 |  * by observing the current state of the queue length and the amount of | 
 | 1048 |  * time it has been in this state for. | 
 | 1049 |  * | 
 | 1050 |  * Normally, that accounting is done on IO completion, but that can result | 
 | 1051 |  * in more than a second's worth of IO being accounted for within any one | 
 | 1052 |  * second, leading to >100% utilisation.  To deal with that, we call this | 
 | 1053 |  * function to do a round-off before returning the results when reading | 
 | 1054 |  * /proc/diskstats.  This accounts immediately for all queue usage up to | 
 | 1055 |  * the current jiffies and restarts the counters again. | 
 | 1056 |  */ | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 1057 | void part_round_stats(int cpu, struct hd_struct *part) | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1058 | { | 
 | 1059 | 	unsigned long now = jiffies; | 
 | 1060 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1061 | 	if (part->partno) | 
 | 1062 | 		part_round_stats_single(cpu, &part_to_disk(part)->part0, now); | 
 | 1063 | 	part_round_stats_single(cpu, part, now); | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1064 | } | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1065 | EXPORT_SYMBOL_GPL(part_round_stats); | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1066 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | /* | 
 | 1068 |  * queue lock must be held | 
 | 1069 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1070 | void __blk_put_request(struct request_queue *q, struct request *req) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | 	if (unlikely(!q)) | 
 | 1073 | 		return; | 
 | 1074 | 	if (unlikely(--req->ref_count)) | 
 | 1075 | 		return; | 
 | 1076 |  | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 1077 | 	elv_completed_request(q, req); | 
 | 1078 |  | 
| Boaz Harrosh | 1cd96c2 | 2009-03-24 12:35:07 +0100 | [diff] [blame] | 1079 | 	/* this is a bio leak */ | 
 | 1080 | 	WARN_ON(req->bio != NULL); | 
 | 1081 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1082 | 	/* | 
 | 1083 | 	 * Request may not have originated from ll_rw_blk. if not, | 
 | 1084 | 	 * it didn't come out of our reserved rq pools | 
 | 1085 | 	 */ | 
| Jens Axboe | 49171e5 | 2006-08-10 08:59:11 +0200 | [diff] [blame] | 1086 | 	if (req->cmd_flags & REQ_ALLOCED) { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 1087 | 		int is_sync = rq_is_sync(req) != 0; | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 1088 | 		int priv = req->cmd_flags & REQ_ELVPRIV; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1090 | 		BUG_ON(!list_empty(&req->queuelist)); | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 1091 | 		BUG_ON(!hlist_unhashed(&req->hash)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 |  | 
 | 1093 | 		blk_free_request(q, req); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 1094 | 		freed_request(q, is_sync, priv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | 	} | 
 | 1096 | } | 
| Mike Christie | 6e39b69 | 2005-11-11 05:30:24 -0600 | [diff] [blame] | 1097 | EXPORT_SYMBOL_GPL(__blk_put_request); | 
 | 1098 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | void blk_put_request(struct request *req) | 
 | 1100 | { | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 1101 | 	unsigned long flags; | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1102 | 	struct request_queue *q = req->q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 |  | 
| FUJITA Tomonori | 52a93ba | 2008-07-15 21:21:45 +0200 | [diff] [blame] | 1104 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 1105 | 	__blk_put_request(q, req); | 
 | 1106 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1107 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | EXPORT_SYMBOL(blk_put_request); | 
 | 1109 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1110 | void init_request_from_bio(struct request *req, struct bio *bio) | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1111 | { | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 1112 | 	req->cpu = bio->bi_comp_cpu; | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 1113 | 	req->cmd_type = REQ_TYPE_FS; | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1114 |  | 
 | 1115 | 	/* | 
| Tejun Heo | a82afdf | 2009-07-03 17:48:16 +0900 | [diff] [blame] | 1116 | 	 * Inherit FAILFAST from bio (for read-ahead, and explicit | 
 | 1117 | 	 * FAILFAST).  FAILFAST flags are identical for req and bio. | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1118 | 	 */ | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1119 | 	if (bio_rw_flagged(bio, BIO_RW_AHEAD)) | 
| Tejun Heo | a82afdf | 2009-07-03 17:48:16 +0900 | [diff] [blame] | 1120 | 		req->cmd_flags |= REQ_FAILFAST_MASK; | 
 | 1121 | 	else | 
 | 1122 | 		req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1123 |  | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1124 | 	if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { | 
| David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 1125 | 		req->cmd_flags |= REQ_DISCARD; | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1126 | 		if (bio_rw_flagged(bio, BIO_RW_BARRIER)) | 
| David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 1127 | 			req->cmd_flags |= REQ_SOFTBARRIER; | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1128 | 	} else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) | 
| Tejun Heo | e4025f6 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 1129 | 		req->cmd_flags |= REQ_HARDBARRIER; | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1130 |  | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1131 | 	if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 1132 | 		req->cmd_flags |= REQ_RW_SYNC; | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1133 | 	if (bio_rw_flagged(bio, BIO_RW_META)) | 
| Jens Axboe | 5404bc7 | 2006-08-10 09:01:02 +0200 | [diff] [blame] | 1134 | 		req->cmd_flags |= REQ_RW_META; | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1135 | 	if (bio_rw_flagged(bio, BIO_RW_NOIDLE)) | 
| Jens Axboe | aeb6faf | 2009-04-06 14:48:07 +0200 | [diff] [blame] | 1136 | 		req->cmd_flags |= REQ_NOIDLE; | 
| Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 1137 |  | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1138 | 	req->errors = 0; | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 1139 | 	req->__sector = bio->bi_sector; | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1140 | 	req->ioprio = bio_prio(bio); | 
| NeilBrown | bc1c56f | 2007-08-16 13:31:30 +0200 | [diff] [blame] | 1141 | 	blk_rq_bio_prep(req->q, req, bio); | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1142 | } | 
 | 1143 |  | 
| Jens Axboe | 644b2d9 | 2009-04-06 14:48:06 +0200 | [diff] [blame] | 1144 | /* | 
 | 1145 |  * Only disabling plugging for non-rotational devices if it does tagging | 
 | 1146 |  * as well, otherwise we do need the proper merging | 
 | 1147 |  */ | 
 | 1148 | static inline bool queue_should_plug(struct request_queue *q) | 
 | 1149 | { | 
| Jens Axboe | fb1e753 | 2009-07-30 08:18:24 +0200 | [diff] [blame] | 1150 | 	return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); | 
| Jens Axboe | 644b2d9 | 2009-04-06 14:48:06 +0200 | [diff] [blame] | 1151 | } | 
 | 1152 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1153 | static int __make_request(struct request_queue *q, struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | { | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1155 | 	struct request *req; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 1156 | 	int el_ret; | 
 | 1157 | 	unsigned int bytes = bio->bi_size; | 
| Jens Axboe | 51da90f | 2006-07-18 04:14:45 +0200 | [diff] [blame] | 1158 | 	const unsigned short prio = bio_prio(bio); | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1159 | 	const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); | 
 | 1160 | 	const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1161 | 	const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 1162 | 	int rw_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 |  | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1164 | 	if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) && | 
| NeilBrown | db64f68 | 2009-06-30 09:35:44 +0200 | [diff] [blame] | 1165 | 	    (q->next_ordered == QUEUE_ORDERED_NONE)) { | 
 | 1166 | 		bio_endio(bio, -EOPNOTSUPP); | 
 | 1167 | 		return 0; | 
 | 1168 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | 	/* | 
 | 1170 | 	 * low level driver can indicate that it wants pages above a | 
 | 1171 | 	 * certain limit bounced to low memory (ie for highmem, or even | 
 | 1172 | 	 * ISA dma in theory) | 
 | 1173 | 	 */ | 
 | 1174 | 	blk_queue_bounce(q, &bio); | 
 | 1175 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | 	spin_lock_irq(q->queue_lock); | 
 | 1177 |  | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1178 | 	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | 		goto get_rq; | 
 | 1180 |  | 
 | 1181 | 	el_ret = elv_merge(q, &req, bio); | 
 | 1182 | 	switch (el_ret) { | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1183 | 	case ELEVATOR_BACK_MERGE: | 
 | 1184 | 		BUG_ON(!rq_mergeable(req)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1185 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1186 | 		if (!ll_back_merge_fn(q, req, bio)) | 
 | 1187 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 |  | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 1189 | 		trace_block_bio_backmerge(q, bio); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1190 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1191 | 		if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | 
 | 1192 | 			blk_rq_set_mixed_merge(req); | 
 | 1193 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1194 | 		req->biotail->bi_next = bio; | 
 | 1195 | 		req->biotail = bio; | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 1196 | 		req->__data_len += bytes; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1197 | 		req->ioprio = ioprio_best(req->ioprio, prio); | 
| Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 1198 | 		if (!blk_rq_cpu_valid(req)) | 
 | 1199 | 			req->cpu = bio->bi_comp_cpu; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1200 | 		drive_stat_acct(req, 0); | 
 | 1201 | 		if (!attempt_back_merge(q, req)) | 
 | 1202 | 			elv_merged_request(q, req, el_ret); | 
 | 1203 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1205 | 	case ELEVATOR_FRONT_MERGE: | 
 | 1206 | 		BUG_ON(!rq_mergeable(req)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1208 | 		if (!ll_front_merge_fn(q, req, bio)) | 
 | 1209 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 |  | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 1211 | 		trace_block_bio_frontmerge(q, bio); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1212 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1213 | 		if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) { | 
 | 1214 | 			blk_rq_set_mixed_merge(req); | 
 | 1215 | 			req->cmd_flags &= ~REQ_FAILFAST_MASK; | 
 | 1216 | 			req->cmd_flags |= ff; | 
 | 1217 | 		} | 
 | 1218 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1219 | 		bio->bi_next = req->bio; | 
 | 1220 | 		req->bio = bio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1222 | 		/* | 
 | 1223 | 		 * may not be valid. if the low level driver said | 
 | 1224 | 		 * it didn't need a bounce buffer then it better | 
 | 1225 | 		 * not touch req->buffer either... | 
 | 1226 | 		 */ | 
 | 1227 | 		req->buffer = bio_data(bio); | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 1228 | 		req->__sector = bio->bi_sector; | 
 | 1229 | 		req->__data_len += bytes; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1230 | 		req->ioprio = ioprio_best(req->ioprio, prio); | 
| Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 1231 | 		if (!blk_rq_cpu_valid(req)) | 
 | 1232 | 			req->cpu = bio->bi_comp_cpu; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1233 | 		drive_stat_acct(req, 0); | 
 | 1234 | 		if (!attempt_front_merge(q, req)) | 
 | 1235 | 			elv_merged_request(q, req, el_ret); | 
 | 1236 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1238 | 	/* ELV_NO_MERGE: elevator says don't/can't merge. */ | 
 | 1239 | 	default: | 
 | 1240 | 		; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | 	} | 
 | 1242 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1243 | get_rq: | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1244 | 	/* | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 1245 | 	 * This sync check and mask will be re-done in init_request_from_bio(), | 
 | 1246 | 	 * but we need to set it earlier to expose the sync flag to the | 
 | 1247 | 	 * rq allocator and io schedulers. | 
 | 1248 | 	 */ | 
 | 1249 | 	rw_flags = bio_data_dir(bio); | 
 | 1250 | 	if (sync) | 
 | 1251 | 		rw_flags |= REQ_RW_SYNC; | 
 | 1252 |  | 
 | 1253 | 	/* | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1254 | 	 * Grab a free request. This is might sleep but can not fail. | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1255 | 	 * Returns with the queue unlocked. | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1256 | 	 */ | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 1257 | 	req = get_request_wait(q, rw_flags, bio); | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1258 |  | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1259 | 	/* | 
 | 1260 | 	 * After dropping the lock and possibly sleeping here, our request | 
 | 1261 | 	 * may now be mergeable after it had proven unmergeable (above). | 
 | 1262 | 	 * We don't worry about that case for efficiency. It won't happen | 
 | 1263 | 	 * often, and the elevators are able to handle it. | 
 | 1264 | 	 */ | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1265 | 	init_request_from_bio(req, bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 |  | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1267 | 	spin_lock_irq(q->queue_lock); | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 1268 | 	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || | 
 | 1269 | 	    bio_flagged(bio, BIO_CPU_AFFINE)) | 
 | 1270 | 		req->cpu = blk_cpu_to_group(smp_processor_id()); | 
| Jens Axboe | 644b2d9 | 2009-04-06 14:48:06 +0200 | [diff] [blame] | 1271 | 	if (queue_should_plug(q) && elv_queue_empty(q)) | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1272 | 		blk_plug_device(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1273 | 	add_request(q, req); | 
 | 1274 | out: | 
| Jens Axboe | 644b2d9 | 2009-04-06 14:48:06 +0200 | [diff] [blame] | 1275 | 	if (unplug || !queue_should_plug(q)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | 		__generic_unplug_device(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | 	spin_unlock_irq(q->queue_lock); | 
 | 1278 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | } | 
 | 1280 |  | 
 | 1281 | /* | 
 | 1282 |  * If bio->bi_dev is a partition, remap the location | 
 | 1283 |  */ | 
 | 1284 | static inline void blk_partition_remap(struct bio *bio) | 
 | 1285 | { | 
 | 1286 | 	struct block_device *bdev = bio->bi_bdev; | 
 | 1287 |  | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1288 | 	if (bio_sectors(bio) && bdev != bdev->bd_contains) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | 		struct hd_struct *p = bdev->bd_part; | 
| Jens Axboe | a362357 | 2005-11-01 09:26:16 +0100 | [diff] [blame] | 1290 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 | 		bio->bi_sector += p->start_sect; | 
 | 1292 | 		bio->bi_bdev = bdev->bd_contains; | 
| Alan D. Brunelle | c7149d6 | 2007-08-07 15:30:23 +0200 | [diff] [blame] | 1293 |  | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 1294 | 		trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, | 
| Alan D. Brunelle | 22a7c31 | 2009-05-04 16:35:08 -0400 | [diff] [blame] | 1295 | 				    bdev->bd_dev, | 
| Alan D. Brunelle | c7149d6 | 2007-08-07 15:30:23 +0200 | [diff] [blame] | 1296 | 				    bio->bi_sector - p->start_sect); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | 	} | 
 | 1298 | } | 
 | 1299 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | static void handle_bad_sector(struct bio *bio) | 
 | 1301 | { | 
 | 1302 | 	char b[BDEVNAME_SIZE]; | 
 | 1303 |  | 
 | 1304 | 	printk(KERN_INFO "attempt to access beyond end of device\n"); | 
 | 1305 | 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", | 
 | 1306 | 			bdevname(bio->bi_bdev, b), | 
 | 1307 | 			bio->bi_rw, | 
 | 1308 | 			(unsigned long long)bio->bi_sector + bio_sectors(bio), | 
 | 1309 | 			(long long)(bio->bi_bdev->bd_inode->i_size >> 9)); | 
 | 1310 |  | 
 | 1311 | 	set_bit(BIO_EOF, &bio->bi_flags); | 
 | 1312 | } | 
 | 1313 |  | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1314 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 
 | 1315 |  | 
 | 1316 | static DECLARE_FAULT_ATTR(fail_make_request); | 
 | 1317 |  | 
 | 1318 | static int __init setup_fail_make_request(char *str) | 
 | 1319 | { | 
 | 1320 | 	return setup_fault_attr(&fail_make_request, str); | 
 | 1321 | } | 
 | 1322 | __setup("fail_make_request=", setup_fail_make_request); | 
 | 1323 |  | 
 | 1324 | static int should_fail_request(struct bio *bio) | 
 | 1325 | { | 
| Tejun Heo | eddb2e2 | 2008-08-25 19:56:13 +0900 | [diff] [blame] | 1326 | 	struct hd_struct *part = bio->bi_bdev->bd_part; | 
 | 1327 |  | 
 | 1328 | 	if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail) | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1329 | 		return should_fail(&fail_make_request, bio->bi_size); | 
 | 1330 |  | 
 | 1331 | 	return 0; | 
 | 1332 | } | 
 | 1333 |  | 
 | 1334 | static int __init fail_make_request_debugfs(void) | 
 | 1335 | { | 
 | 1336 | 	return init_fault_attr_dentries(&fail_make_request, | 
 | 1337 | 					"fail_make_request"); | 
 | 1338 | } | 
 | 1339 |  | 
 | 1340 | late_initcall(fail_make_request_debugfs); | 
 | 1341 |  | 
 | 1342 | #else /* CONFIG_FAIL_MAKE_REQUEST */ | 
 | 1343 |  | 
 | 1344 | static inline int should_fail_request(struct bio *bio) | 
 | 1345 | { | 
 | 1346 | 	return 0; | 
 | 1347 | } | 
 | 1348 |  | 
 | 1349 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ | 
 | 1350 |  | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1351 | /* | 
 | 1352 |  * Check whether this bio extends beyond the end of the device. | 
 | 1353 |  */ | 
 | 1354 | static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | 
 | 1355 | { | 
 | 1356 | 	sector_t maxsector; | 
 | 1357 |  | 
 | 1358 | 	if (!nr_sectors) | 
 | 1359 | 		return 0; | 
 | 1360 |  | 
 | 1361 | 	/* Test device or partition size, when known. */ | 
 | 1362 | 	maxsector = bio->bi_bdev->bd_inode->i_size >> 9; | 
 | 1363 | 	if (maxsector) { | 
 | 1364 | 		sector_t sector = bio->bi_sector; | 
 | 1365 |  | 
 | 1366 | 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { | 
 | 1367 | 			/* | 
 | 1368 | 			 * This may well happen - the kernel calls bread() | 
 | 1369 | 			 * without checking the size of the device, e.g., when | 
 | 1370 | 			 * mounting a device. | 
 | 1371 | 			 */ | 
 | 1372 | 			handle_bad_sector(bio); | 
 | 1373 | 			return 1; | 
 | 1374 | 		} | 
 | 1375 | 	} | 
 | 1376 |  | 
 | 1377 | 	return 0; | 
 | 1378 | } | 
 | 1379 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1380 | /** | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 1381 |  * generic_make_request - hand a buffer to its device driver for I/O | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 |  * @bio:  The bio describing the location in memory and on the device. | 
 | 1383 |  * | 
 | 1384 |  * generic_make_request() is used to make I/O requests of block | 
 | 1385 |  * devices. It is passed a &struct bio, which describes the I/O that needs | 
 | 1386 |  * to be done. | 
 | 1387 |  * | 
 | 1388 |  * generic_make_request() does not return any status.  The | 
 | 1389 |  * success/failure status of the request, along with notification of | 
 | 1390 |  * completion, is delivered asynchronously through the bio->bi_end_io | 
 | 1391 |  * function described (one day) else where. | 
 | 1392 |  * | 
 | 1393 |  * The caller of generic_make_request must make sure that bi_io_vec | 
 | 1394 |  * are set to describe the memory buffer, and that bi_dev and bi_sector are | 
 | 1395 |  * set to describe the device address, and the | 
 | 1396 |  * bi_end_io and optionally bi_private are set to describe how | 
 | 1397 |  * completion notification should be signaled. | 
 | 1398 |  * | 
 | 1399 |  * generic_make_request and the drivers it calls may use bi_next if this | 
 | 1400 |  * bio happens to be merged with someone else, and may change bi_dev and | 
 | 1401 |  * bi_sector for remaps as it sees fit.  So the values of these fields | 
 | 1402 |  * should NOT be depended on after the call to generic_make_request. | 
 | 1403 |  */ | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1404 | static inline void __generic_make_request(struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1406 | 	struct request_queue *q; | 
| NeilBrown | 5ddfe96 | 2006-10-30 22:07:21 -0800 | [diff] [blame] | 1407 | 	sector_t old_sector; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | 	int ret, nr_sectors = bio_sectors(bio); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1409 | 	dev_t old_dev; | 
| Jens Axboe | 51fd77b | 2007-11-02 08:49:08 +0100 | [diff] [blame] | 1410 | 	int err = -EIO; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 |  | 
 | 1412 | 	might_sleep(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1413 |  | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1414 | 	if (bio_check_eod(bio, nr_sectors)) | 
 | 1415 | 		goto end_io; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1416 |  | 
 | 1417 | 	/* | 
 | 1418 | 	 * Resolve the mapping until finished. (drivers are | 
 | 1419 | 	 * still free to implement/resolve their own stacking | 
 | 1420 | 	 * by explicitly returning 0) | 
 | 1421 | 	 * | 
 | 1422 | 	 * NOTE: we don't repeat the blk_size check for each new device. | 
 | 1423 | 	 * Stacking drivers are expected to know what they are doing. | 
 | 1424 | 	 */ | 
| NeilBrown | 5ddfe96 | 2006-10-30 22:07:21 -0800 | [diff] [blame] | 1425 | 	old_sector = -1; | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1426 | 	old_dev = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | 	do { | 
 | 1428 | 		char b[BDEVNAME_SIZE]; | 
 | 1429 |  | 
 | 1430 | 		q = bdev_get_queue(bio->bi_bdev); | 
| Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 1431 | 		if (unlikely(!q)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 | 			printk(KERN_ERR | 
 | 1433 | 			       "generic_make_request: Trying to access " | 
 | 1434 | 				"nonexistent block-device %s (%Lu)\n", | 
 | 1435 | 				bdevname(bio->bi_bdev, b), | 
 | 1436 | 				(long long) bio->bi_sector); | 
| Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 1437 | 			goto end_io; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1438 | 		} | 
 | 1439 |  | 
| Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 1440 | 		if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && | 
 | 1441 | 			     nr_sectors > queue_max_hw_sectors(q))) { | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1442 | 			printk(KERN_ERR "bio too big device %s (%u > %u)\n", | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1443 | 			       bdevname(bio->bi_bdev, b), | 
 | 1444 | 			       bio_sectors(bio), | 
 | 1445 | 			       queue_max_hw_sectors(q)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1446 | 			goto end_io; | 
 | 1447 | 		} | 
 | 1448 |  | 
| Nick Piggin | fde6ad2 | 2005-06-23 00:08:53 -0700 | [diff] [blame] | 1449 | 		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1450 | 			goto end_io; | 
 | 1451 |  | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1452 | 		if (should_fail_request(bio)) | 
 | 1453 | 			goto end_io; | 
 | 1454 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1455 | 		/* | 
 | 1456 | 		 * If this device has partitions, remap block n | 
 | 1457 | 		 * of partition p to block n+start(p) of the disk. | 
 | 1458 | 		 */ | 
 | 1459 | 		blk_partition_remap(bio); | 
 | 1460 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1461 | 		if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) | 
 | 1462 | 			goto end_io; | 
 | 1463 |  | 
| NeilBrown | 5ddfe96 | 2006-10-30 22:07:21 -0800 | [diff] [blame] | 1464 | 		if (old_sector != -1) | 
| Alan D. Brunelle | 22a7c31 | 2009-05-04 16:35:08 -0400 | [diff] [blame] | 1465 | 			trace_block_remap(q, bio, old_dev, old_sector); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1466 |  | 
| NeilBrown | 5ddfe96 | 2006-10-30 22:07:21 -0800 | [diff] [blame] | 1467 | 		old_sector = bio->bi_sector; | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1468 | 		old_dev = bio->bi_bdev->bd_dev; | 
 | 1469 |  | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1470 | 		if (bio_check_eod(bio, nr_sectors)) | 
 | 1471 | 			goto end_io; | 
| Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 1472 |  | 
| Jens Axboe | 1f98a13 | 2009-09-11 14:32:04 +0200 | [diff] [blame] | 1473 | 		if (bio_rw_flagged(bio, BIO_RW_DISCARD) && | 
| Christoph Hellwig | c15227d | 2009-09-30 13:52:12 +0200 | [diff] [blame] | 1474 | 		    !blk_queue_discard(q)) { | 
| Jens Axboe | 51fd77b | 2007-11-02 08:49:08 +0100 | [diff] [blame] | 1475 | 			err = -EOPNOTSUPP; | 
 | 1476 | 			goto end_io; | 
 | 1477 | 		} | 
| NeilBrown | 5ddfe96 | 2006-10-30 22:07:21 -0800 | [diff] [blame] | 1478 |  | 
| Minchan Kim | 01edede | 2009-09-08 21:56:38 +0200 | [diff] [blame] | 1479 | 		trace_block_bio_queue(q, bio); | 
 | 1480 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 | 		ret = q->make_request_fn(q, bio); | 
 | 1482 | 	} while (ret); | 
| Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 1483 |  | 
 | 1484 | 	return; | 
 | 1485 |  | 
 | 1486 | end_io: | 
 | 1487 | 	bio_endio(bio, err); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1488 | } | 
 | 1489 |  | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1490 | /* | 
 | 1491 |  * We only want one ->make_request_fn to be active at a time, | 
 | 1492 |  * else stack usage with stacked devices could be a problem. | 
 | 1493 |  * So use current->bio_{list,tail} to keep a list of requests | 
 | 1494 |  * submited by a make_request_fn function. | 
 | 1495 |  * current->bio_tail is also used as a flag to say if | 
 | 1496 |  * generic_make_request is currently active in this task or not. | 
 | 1497 |  * If it is NULL, then no make_request is active.  If it is non-NULL, | 
 | 1498 |  * then a make_request is active, and new requests should be added | 
 | 1499 |  * at the tail | 
 | 1500 |  */ | 
 | 1501 | void generic_make_request(struct bio *bio) | 
 | 1502 | { | 
 | 1503 | 	if (current->bio_tail) { | 
 | 1504 | 		/* make_request is active */ | 
 | 1505 | 		*(current->bio_tail) = bio; | 
 | 1506 | 		bio->bi_next = NULL; | 
 | 1507 | 		current->bio_tail = &bio->bi_next; | 
 | 1508 | 		return; | 
 | 1509 | 	} | 
 | 1510 | 	/* following loop may be a bit non-obvious, and so deserves some | 
 | 1511 | 	 * explanation. | 
 | 1512 | 	 * Before entering the loop, bio->bi_next is NULL (as all callers | 
 | 1513 | 	 * ensure that) so we have a list with a single bio. | 
 | 1514 | 	 * We pretend that we have just taken it off a longer list, so | 
 | 1515 | 	 * we assign bio_list to the next (which is NULL) and bio_tail | 
 | 1516 | 	 * to &bio_list, thus initialising the bio_list of new bios to be | 
 | 1517 | 	 * added.  __generic_make_request may indeed add some more bios | 
 | 1518 | 	 * through a recursive call to generic_make_request.  If it | 
 | 1519 | 	 * did, we find a non-NULL value in bio_list and re-enter the loop | 
 | 1520 | 	 * from the top.  In this case we really did just take the bio | 
 | 1521 | 	 * of the top of the list (no pretending) and so fixup bio_list and | 
 | 1522 | 	 * bio_tail or bi_next, and call into __generic_make_request again. | 
 | 1523 | 	 * | 
 | 1524 | 	 * The loop was structured like this to make only one call to | 
 | 1525 | 	 * __generic_make_request (which is important as it is large and | 
 | 1526 | 	 * inlined) and to keep the structure simple. | 
 | 1527 | 	 */ | 
 | 1528 | 	BUG_ON(bio->bi_next); | 
 | 1529 | 	do { | 
 | 1530 | 		current->bio_list = bio->bi_next; | 
 | 1531 | 		if (bio->bi_next == NULL) | 
 | 1532 | 			current->bio_tail = ¤t->bio_list; | 
 | 1533 | 		else | 
 | 1534 | 			bio->bi_next = NULL; | 
 | 1535 | 		__generic_make_request(bio); | 
 | 1536 | 		bio = current->bio_list; | 
 | 1537 | 	} while (bio); | 
 | 1538 | 	current->bio_tail = NULL; /* deactivate */ | 
 | 1539 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 | EXPORT_SYMBOL(generic_make_request); | 
 | 1541 |  | 
 | 1542 | /** | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 1543 |  * submit_bio - submit a bio to the block device layer for I/O | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1544 |  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | 
 | 1545 |  * @bio: The &struct bio which describes the I/O | 
 | 1546 |  * | 
 | 1547 |  * submit_bio() is very similar in purpose to generic_make_request(), and | 
 | 1548 |  * uses that function to do most of the work. Both are fairly rough | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 1549 |  * interfaces; @bio must be presetup and ready for I/O. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1550 |  * | 
 | 1551 |  */ | 
 | 1552 | void submit_bio(int rw, struct bio *bio) | 
 | 1553 | { | 
 | 1554 | 	int count = bio_sectors(bio); | 
 | 1555 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1556 | 	bio->bi_rw |= rw; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 |  | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1558 | 	/* | 
 | 1559 | 	 * If it's a regular read/write or a barrier with data attached, | 
 | 1560 | 	 * go through the normal accounting stuff before submission. | 
 | 1561 | 	 */ | 
| Jens Axboe | a9c701e | 2008-08-08 11:04:44 +0200 | [diff] [blame] | 1562 | 	if (bio_has_data(bio)) { | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1563 | 		if (rw & WRITE) { | 
 | 1564 | 			count_vm_events(PGPGOUT, count); | 
 | 1565 | 		} else { | 
 | 1566 | 			task_io_account_read(bio->bi_size); | 
 | 1567 | 			count_vm_events(PGPGIN, count); | 
 | 1568 | 		} | 
 | 1569 |  | 
 | 1570 | 		if (unlikely(block_dump)) { | 
 | 1571 | 			char b[BDEVNAME_SIZE]; | 
 | 1572 | 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 1573 | 			current->comm, task_pid_nr(current), | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1574 | 				(rw & WRITE) ? "WRITE" : "READ", | 
 | 1575 | 				(unsigned long long)bio->bi_sector, | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1576 | 				bdevname(bio->bi_bdev, b)); | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1577 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 | 	} | 
 | 1579 |  | 
 | 1580 | 	generic_make_request(bio); | 
 | 1581 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | EXPORT_SYMBOL(submit_bio); | 
 | 1583 |  | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1584 | /** | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1585 |  * blk_rq_check_limits - Helper function to check a request for the queue limit | 
 | 1586 |  * @q:  the queue | 
 | 1587 |  * @rq: the request being checked | 
 | 1588 |  * | 
 | 1589 |  * Description: | 
 | 1590 |  *    @rq may have been made based on weaker limitations of upper-level queues | 
 | 1591 |  *    in request stacking drivers, and it may violate the limitation of @q. | 
 | 1592 |  *    Since the block layer and the underlying device driver trust @rq | 
 | 1593 |  *    after it is inserted to @q, it should be checked against @q before | 
 | 1594 |  *    the insertion using this generic function. | 
 | 1595 |  * | 
 | 1596 |  *    This function should also be useful for request stacking drivers | 
 | 1597 |  *    in some cases below, so export this fuction. | 
 | 1598 |  *    Request stacking drivers like request-based dm may change the queue | 
 | 1599 |  *    limits while requests are in the queue (e.g. dm's table swapping). | 
 | 1600 |  *    Such request stacking drivers should check those requests agaist | 
 | 1601 |  *    the new queue limits again when they dispatch those requests, | 
 | 1602 |  *    although such checkings are also done against the old queue limits | 
 | 1603 |  *    when submitting requests. | 
 | 1604 |  */ | 
 | 1605 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) | 
 | 1606 | { | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1607 | 	if (blk_rq_sectors(rq) > queue_max_sectors(q) || | 
 | 1608 | 	    blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1609 | 		printk(KERN_ERR "%s: over max size limit.\n", __func__); | 
 | 1610 | 		return -EIO; | 
 | 1611 | 	} | 
 | 1612 |  | 
 | 1613 | 	/* | 
 | 1614 | 	 * queue's settings related to segment counting like q->bounce_pfn | 
 | 1615 | 	 * may differ from that of other stacking queues. | 
 | 1616 | 	 * Recalculate it to check the request correctly on this queue's | 
 | 1617 | 	 * limitation. | 
 | 1618 | 	 */ | 
 | 1619 | 	blk_recalc_rq_segments(rq); | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1620 | 	if (rq->nr_phys_segments > queue_max_phys_segments(q) || | 
 | 1621 | 	    rq->nr_phys_segments > queue_max_hw_segments(q)) { | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1622 | 		printk(KERN_ERR "%s: over max segments limit.\n", __func__); | 
 | 1623 | 		return -EIO; | 
 | 1624 | 	} | 
 | 1625 |  | 
 | 1626 | 	return 0; | 
 | 1627 | } | 
 | 1628 | EXPORT_SYMBOL_GPL(blk_rq_check_limits); | 
 | 1629 |  | 
 | 1630 | /** | 
 | 1631 |  * blk_insert_cloned_request - Helper for stacking drivers to submit a request | 
 | 1632 |  * @q:  the queue to submit the request | 
 | 1633 |  * @rq: the request being queued | 
 | 1634 |  */ | 
 | 1635 | int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | 
 | 1636 | { | 
 | 1637 | 	unsigned long flags; | 
 | 1638 |  | 
 | 1639 | 	if (blk_rq_check_limits(q, rq)) | 
 | 1640 | 		return -EIO; | 
 | 1641 |  | 
 | 1642 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 
 | 1643 | 	if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && | 
 | 1644 | 	    should_fail(&fail_make_request, blk_rq_bytes(rq))) | 
 | 1645 | 		return -EIO; | 
 | 1646 | #endif | 
 | 1647 |  | 
 | 1648 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 1649 |  | 
 | 1650 | 	/* | 
 | 1651 | 	 * Submitting request must be dequeued before calling this function | 
 | 1652 | 	 * because it will be linked to another request_queue | 
 | 1653 | 	 */ | 
 | 1654 | 	BUG_ON(blk_queued_rq(rq)); | 
 | 1655 |  | 
 | 1656 | 	drive_stat_acct(rq, 1); | 
 | 1657 | 	__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | 
 | 1658 |  | 
 | 1659 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 1660 |  | 
 | 1661 | 	return 0; | 
 | 1662 | } | 
 | 1663 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 
 | 1664 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1665 | /** | 
 | 1666 |  * blk_rq_err_bytes - determine number of bytes till the next failure boundary | 
 | 1667 |  * @rq: request to examine | 
 | 1668 |  * | 
 | 1669 |  * Description: | 
 | 1670 |  *     A request could be merge of IOs which require different failure | 
 | 1671 |  *     handling.  This function determines the number of bytes which | 
 | 1672 |  *     can be failed from the beginning of the request without | 
 | 1673 |  *     crossing into area which need to be retried further. | 
 | 1674 |  * | 
 | 1675 |  * Return: | 
 | 1676 |  *     The number of bytes to fail. | 
 | 1677 |  * | 
 | 1678 |  * Context: | 
 | 1679 |  *     queue_lock must be held. | 
 | 1680 |  */ | 
 | 1681 | unsigned int blk_rq_err_bytes(const struct request *rq) | 
 | 1682 | { | 
 | 1683 | 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | 
 | 1684 | 	unsigned int bytes = 0; | 
 | 1685 | 	struct bio *bio; | 
 | 1686 |  | 
 | 1687 | 	if (!(rq->cmd_flags & REQ_MIXED_MERGE)) | 
 | 1688 | 		return blk_rq_bytes(rq); | 
 | 1689 |  | 
 | 1690 | 	/* | 
 | 1691 | 	 * Currently the only 'mixing' which can happen is between | 
 | 1692 | 	 * different fastfail types.  We can safely fail portions | 
 | 1693 | 	 * which have all the failfast bits that the first one has - | 
 | 1694 | 	 * the ones which are at least as eager to fail as the first | 
 | 1695 | 	 * one. | 
 | 1696 | 	 */ | 
 | 1697 | 	for (bio = rq->bio; bio; bio = bio->bi_next) { | 
 | 1698 | 		if ((bio->bi_rw & ff) != ff) | 
 | 1699 | 			break; | 
 | 1700 | 		bytes += bio->bi_size; | 
 | 1701 | 	} | 
 | 1702 |  | 
 | 1703 | 	/* this could lead to infinite loop */ | 
 | 1704 | 	BUG_ON(blk_rq_bytes(rq) && !bytes); | 
 | 1705 | 	return bytes; | 
 | 1706 | } | 
 | 1707 | EXPORT_SYMBOL_GPL(blk_rq_err_bytes); | 
 | 1708 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1709 | static void blk_account_io_completion(struct request *req, unsigned int bytes) | 
 | 1710 | { | 
| Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 1711 | 	if (blk_do_io_stat(req)) { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1712 | 		const int rw = rq_data_dir(req); | 
 | 1713 | 		struct hd_struct *part; | 
 | 1714 | 		int cpu; | 
 | 1715 |  | 
 | 1716 | 		cpu = part_stat_lock(); | 
| Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 1717 | 		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1718 | 		part_stat_add(cpu, part, sectors[rw], bytes >> 9); | 
 | 1719 | 		part_stat_unlock(); | 
 | 1720 | 	} | 
 | 1721 | } | 
 | 1722 |  | 
 | 1723 | static void blk_account_io_done(struct request *req) | 
 | 1724 | { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1725 | 	/* | 
 | 1726 | 	 * Account IO completion.  bar_rq isn't accounted as a normal | 
 | 1727 | 	 * IO on queueing nor completion.  Accounting the containing | 
 | 1728 | 	 * request is enough. | 
 | 1729 | 	 */ | 
| Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 1730 | 	if (blk_do_io_stat(req) && req != &req->q->bar_rq) { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1731 | 		unsigned long duration = jiffies - req->start_time; | 
 | 1732 | 		const int rw = rq_data_dir(req); | 
 | 1733 | 		struct hd_struct *part; | 
 | 1734 | 		int cpu; | 
 | 1735 |  | 
 | 1736 | 		cpu = part_stat_lock(); | 
| Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 1737 | 		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1738 |  | 
 | 1739 | 		part_stat_inc(cpu, part, ios[rw]); | 
 | 1740 | 		part_stat_add(cpu, part, ticks[rw], duration); | 
 | 1741 | 		part_round_stats(cpu, part); | 
| Jens Axboe | 0f78ab9 | 2009-10-04 21:04:38 +0200 | [diff] [blame] | 1742 | 		part_dec_in_flight(part); | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1743 |  | 
 | 1744 | 		part_stat_unlock(); | 
 | 1745 | 	} | 
 | 1746 | } | 
 | 1747 |  | 
| Tejun Heo | 53a0880 | 2008-12-03 12:41:26 +0100 | [diff] [blame] | 1748 | /** | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1749 |  * blk_peek_request - peek at the top of a request queue | 
 | 1750 |  * @q: request queue to peek at | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1751 |  * | 
 | 1752 |  * Description: | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1753 |  *     Return the request at the top of @q.  The returned request | 
 | 1754 |  *     should be started using blk_start_request() before LLD starts | 
 | 1755 |  *     processing it. | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1756 |  * | 
 | 1757 |  * Return: | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1758 |  *     Pointer to the request at the top of @q if available.  Null | 
 | 1759 |  *     otherwise. | 
 | 1760 |  * | 
 | 1761 |  * Context: | 
 | 1762 |  *     queue_lock must be held. | 
 | 1763 |  */ | 
 | 1764 | struct request *blk_peek_request(struct request_queue *q) | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1765 | { | 
 | 1766 | 	struct request *rq; | 
 | 1767 | 	int ret; | 
 | 1768 |  | 
 | 1769 | 	while ((rq = __elv_next_request(q)) != NULL) { | 
 | 1770 | 		if (!(rq->cmd_flags & REQ_STARTED)) { | 
 | 1771 | 			/* | 
 | 1772 | 			 * This is the first time the device driver | 
 | 1773 | 			 * sees this request (possibly after | 
 | 1774 | 			 * requeueing).  Notify IO scheduler. | 
 | 1775 | 			 */ | 
 | 1776 | 			if (blk_sorted_rq(rq)) | 
 | 1777 | 				elv_activate_rq(q, rq); | 
 | 1778 |  | 
 | 1779 | 			/* | 
 | 1780 | 			 * just mark as started even if we don't start | 
 | 1781 | 			 * it, a request that has been delayed should | 
 | 1782 | 			 * not be passed by new incoming requests | 
 | 1783 | 			 */ | 
 | 1784 | 			rq->cmd_flags |= REQ_STARTED; | 
 | 1785 | 			trace_block_rq_issue(q, rq); | 
 | 1786 | 		} | 
 | 1787 |  | 
 | 1788 | 		if (!q->boundary_rq || q->boundary_rq == rq) { | 
 | 1789 | 			q->end_sector = rq_end_sector(rq); | 
 | 1790 | 			q->boundary_rq = NULL; | 
 | 1791 | 		} | 
 | 1792 |  | 
 | 1793 | 		if (rq->cmd_flags & REQ_DONTPREP) | 
 | 1794 | 			break; | 
 | 1795 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 1796 | 		if (q->dma_drain_size && blk_rq_bytes(rq)) { | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1797 | 			/* | 
 | 1798 | 			 * make sure space for the drain appears we | 
 | 1799 | 			 * know we can do this because max_hw_segments | 
 | 1800 | 			 * has been adjusted to be one fewer than the | 
 | 1801 | 			 * device can handle | 
 | 1802 | 			 */ | 
 | 1803 | 			rq->nr_phys_segments++; | 
 | 1804 | 		} | 
 | 1805 |  | 
 | 1806 | 		if (!q->prep_rq_fn) | 
 | 1807 | 			break; | 
 | 1808 |  | 
 | 1809 | 		ret = q->prep_rq_fn(q, rq); | 
 | 1810 | 		if (ret == BLKPREP_OK) { | 
 | 1811 | 			break; | 
 | 1812 | 		} else if (ret == BLKPREP_DEFER) { | 
 | 1813 | 			/* | 
 | 1814 | 			 * the request may have been (partially) prepped. | 
 | 1815 | 			 * we need to keep this request in the front to | 
 | 1816 | 			 * avoid resource deadlock.  REQ_STARTED will | 
 | 1817 | 			 * prevent other fs requests from passing this one. | 
 | 1818 | 			 */ | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 1819 | 			if (q->dma_drain_size && blk_rq_bytes(rq) && | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1820 | 			    !(rq->cmd_flags & REQ_DONTPREP)) { | 
 | 1821 | 				/* | 
 | 1822 | 				 * remove the space for the drain we added | 
 | 1823 | 				 * so that we don't add it again | 
 | 1824 | 				 */ | 
 | 1825 | 				--rq->nr_phys_segments; | 
 | 1826 | 			} | 
 | 1827 |  | 
 | 1828 | 			rq = NULL; | 
 | 1829 | 			break; | 
 | 1830 | 		} else if (ret == BLKPREP_KILL) { | 
 | 1831 | 			rq->cmd_flags |= REQ_QUIET; | 
| James Bottomley | c143dc9 | 2009-05-30 06:43:49 +0200 | [diff] [blame] | 1832 | 			/* | 
 | 1833 | 			 * Mark this request as started so we don't trigger | 
 | 1834 | 			 * any debug logic in the end I/O path. | 
 | 1835 | 			 */ | 
 | 1836 | 			blk_start_request(rq); | 
| Tejun Heo | 40cbbb7 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 1837 | 			__blk_end_request_all(rq, -EIO); | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1838 | 		} else { | 
 | 1839 | 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); | 
 | 1840 | 			break; | 
 | 1841 | 		} | 
 | 1842 | 	} | 
 | 1843 |  | 
 | 1844 | 	return rq; | 
 | 1845 | } | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1846 | EXPORT_SYMBOL(blk_peek_request); | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1847 |  | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1848 | void blk_dequeue_request(struct request *rq) | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1849 | { | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1850 | 	struct request_queue *q = rq->q; | 
 | 1851 |  | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1852 | 	BUG_ON(list_empty(&rq->queuelist)); | 
 | 1853 | 	BUG_ON(ELV_ON_HASH(rq)); | 
 | 1854 |  | 
 | 1855 | 	list_del_init(&rq->queuelist); | 
 | 1856 |  | 
 | 1857 | 	/* | 
 | 1858 | 	 * the time frame between a request being removed from the lists | 
 | 1859 | 	 * and to it is freed is accounted as io that is in progress at | 
 | 1860 | 	 * the driver side. | 
 | 1861 | 	 */ | 
| Jens Axboe | fb1e753 | 2009-07-30 08:18:24 +0200 | [diff] [blame] | 1862 | 	if (blk_account_rq(rq)) { | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 1863 | 		q->in_flight[rq_is_sync(rq)]++; | 
| Jens Axboe | fb1e753 | 2009-07-30 08:18:24 +0200 | [diff] [blame] | 1864 | 		/* | 
 | 1865 | 		 * Mark this device as supporting hardware queuing, if | 
 | 1866 | 		 * we have more IOs in flight than 4. | 
 | 1867 | 		 */ | 
 | 1868 | 		if (!blk_queue_queuing(q) && queue_in_flight(q) > 4) | 
 | 1869 | 			set_bit(QUEUE_FLAG_CQ, &q->queue_flags); | 
 | 1870 | 	} | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1871 | } | 
 | 1872 |  | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1873 | /** | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1874 |  * blk_start_request - start request processing on the driver | 
 | 1875 |  * @req: request to dequeue | 
 | 1876 |  * | 
 | 1877 |  * Description: | 
 | 1878 |  *     Dequeue @req and start timeout timer on it.  This hands off the | 
 | 1879 |  *     request to the driver. | 
 | 1880 |  * | 
 | 1881 |  *     Block internal functions which don't want to start timer should | 
 | 1882 |  *     call blk_dequeue_request(). | 
 | 1883 |  * | 
 | 1884 |  * Context: | 
 | 1885 |  *     queue_lock must be held. | 
 | 1886 |  */ | 
 | 1887 | void blk_start_request(struct request *req) | 
 | 1888 | { | 
 | 1889 | 	blk_dequeue_request(req); | 
 | 1890 |  | 
 | 1891 | 	/* | 
| Tejun Heo | 5f49f63 | 2009-05-19 18:33:05 +0900 | [diff] [blame] | 1892 | 	 * We are now handing the request to the hardware, initialize | 
 | 1893 | 	 * resid_len to full count and add the timeout handler. | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1894 | 	 */ | 
| Tejun Heo | 5f49f63 | 2009-05-19 18:33:05 +0900 | [diff] [blame] | 1895 | 	req->resid_len = blk_rq_bytes(req); | 
| FUJITA Tomonori | dbb66c4 | 2009-06-09 05:47:10 +0200 | [diff] [blame] | 1896 | 	if (unlikely(blk_bidi_rq(req))) | 
 | 1897 | 		req->next_rq->resid_len = blk_rq_bytes(req->next_rq); | 
 | 1898 |  | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1899 | 	blk_add_timer(req); | 
 | 1900 | } | 
 | 1901 | EXPORT_SYMBOL(blk_start_request); | 
 | 1902 |  | 
 | 1903 | /** | 
 | 1904 |  * blk_fetch_request - fetch a request from a request queue | 
 | 1905 |  * @q: request queue to fetch a request from | 
 | 1906 |  * | 
 | 1907 |  * Description: | 
 | 1908 |  *     Return the request at the top of @q.  The request is started on | 
 | 1909 |  *     return and LLD can start processing it immediately. | 
 | 1910 |  * | 
 | 1911 |  * Return: | 
 | 1912 |  *     Pointer to the request at the top of @q if available.  Null | 
 | 1913 |  *     otherwise. | 
 | 1914 |  * | 
 | 1915 |  * Context: | 
 | 1916 |  *     queue_lock must be held. | 
 | 1917 |  */ | 
 | 1918 | struct request *blk_fetch_request(struct request_queue *q) | 
 | 1919 | { | 
 | 1920 | 	struct request *rq; | 
 | 1921 |  | 
 | 1922 | 	rq = blk_peek_request(q); | 
 | 1923 | 	if (rq) | 
 | 1924 | 		blk_start_request(rq); | 
 | 1925 | 	return rq; | 
 | 1926 | } | 
 | 1927 | EXPORT_SYMBOL(blk_fetch_request); | 
 | 1928 |  | 
 | 1929 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1930 |  * blk_update_request - Special helper function for request stacking drivers | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 1931 |  * @req:      the request being processed | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1932 |  * @error:    %0 for success, < %0 for error | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 1933 |  * @nr_bytes: number of bytes to complete @req | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1934 |  * | 
 | 1935 |  * Description: | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 1936 |  *     Ends I/O on a number of bytes attached to @req, but doesn't complete | 
 | 1937 |  *     the request structure even if @req doesn't have leftover. | 
 | 1938 |  *     If @req has leftover, sets it up for the next range of segments. | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1939 |  * | 
 | 1940 |  *     This special helper function is only for request stacking drivers | 
 | 1941 |  *     (e.g. request-based dm) so that they can handle partial completion. | 
 | 1942 |  *     Actual device drivers should use blk_end_request instead. | 
 | 1943 |  * | 
 | 1944 |  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees | 
 | 1945 |  *     %false return from this function. | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1946 |  * | 
 | 1947 |  * Return: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1948 |  *     %false - this request doesn't have any more data | 
 | 1949 |  *     %true  - this request has more data | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1950 |  **/ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1951 | bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1952 | { | 
| Kiyoshi Ueda | 5450d3e | 2007-12-11 17:53:03 -0500 | [diff] [blame] | 1953 | 	int total_bytes, bio_nbytes, next_idx = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1954 | 	struct bio *bio; | 
 | 1955 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 1956 | 	if (!req->bio) | 
 | 1957 | 		return false; | 
 | 1958 |  | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 1959 | 	trace_block_rq_complete(req->q, req); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1960 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1961 | 	/* | 
| Tejun Heo | 6f41469 | 2009-04-19 07:00:41 +0900 | [diff] [blame] | 1962 | 	 * For fs requests, rq is just carrier of independent bio's | 
 | 1963 | 	 * and each partial completion should be handled separately. | 
 | 1964 | 	 * Reset per-request error on each partial completion. | 
 | 1965 | 	 * | 
 | 1966 | 	 * TODO: tj: This is too subtle.  It would be better to let | 
 | 1967 | 	 * low level drivers do what they see fit. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1968 | 	 */ | 
| Tejun Heo | 6f41469 | 2009-04-19 07:00:41 +0900 | [diff] [blame] | 1969 | 	if (blk_fs_request(req)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1970 | 		req->errors = 0; | 
 | 1971 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1972 | 	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { | 
 | 1973 | 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1974 | 				req->rq_disk ? req->rq_disk->disk_name : "?", | 
| Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 1975 | 				(unsigned long long)blk_rq_pos(req)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1976 | 	} | 
 | 1977 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1978 | 	blk_account_io_completion(req, nr_bytes); | 
| Jens Axboe | d72d904 | 2005-11-01 08:35:42 +0100 | [diff] [blame] | 1979 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1980 | 	total_bytes = bio_nbytes = 0; | 
 | 1981 | 	while ((bio = req->bio) != NULL) { | 
 | 1982 | 		int nbytes; | 
 | 1983 |  | 
 | 1984 | 		if (nr_bytes >= bio->bi_size) { | 
 | 1985 | 			req->bio = bio->bi_next; | 
 | 1986 | 			nbytes = bio->bi_size; | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 1987 | 			req_bio_endio(req, bio, nbytes, error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1988 | 			next_idx = 0; | 
 | 1989 | 			bio_nbytes = 0; | 
 | 1990 | 		} else { | 
 | 1991 | 			int idx = bio->bi_idx + next_idx; | 
 | 1992 |  | 
| Kazuhisa Ichikawa | af498d7 | 2009-05-12 13:27:45 +0200 | [diff] [blame] | 1993 | 			if (unlikely(idx >= bio->bi_vcnt)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1994 | 				blk_dump_rq_flags(req, "__end_that"); | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 1995 | 				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", | 
| Kazuhisa Ichikawa | af498d7 | 2009-05-12 13:27:45 +0200 | [diff] [blame] | 1996 | 				       __func__, idx, bio->bi_vcnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1997 | 				break; | 
 | 1998 | 			} | 
 | 1999 |  | 
 | 2000 | 			nbytes = bio_iovec_idx(bio, idx)->bv_len; | 
 | 2001 | 			BIO_BUG_ON(nbytes > bio->bi_size); | 
 | 2002 |  | 
 | 2003 | 			/* | 
 | 2004 | 			 * not a complete bvec done | 
 | 2005 | 			 */ | 
 | 2006 | 			if (unlikely(nbytes > nr_bytes)) { | 
 | 2007 | 				bio_nbytes += nr_bytes; | 
 | 2008 | 				total_bytes += nr_bytes; | 
 | 2009 | 				break; | 
 | 2010 | 			} | 
 | 2011 |  | 
 | 2012 | 			/* | 
 | 2013 | 			 * advance to the next vector | 
 | 2014 | 			 */ | 
 | 2015 | 			next_idx++; | 
 | 2016 | 			bio_nbytes += nbytes; | 
 | 2017 | 		} | 
 | 2018 |  | 
 | 2019 | 		total_bytes += nbytes; | 
 | 2020 | 		nr_bytes -= nbytes; | 
 | 2021 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 2022 | 		bio = req->bio; | 
 | 2023 | 		if (bio) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2024 | 			/* | 
 | 2025 | 			 * end more in this run, or just return 'not-done' | 
 | 2026 | 			 */ | 
 | 2027 | 			if (unlikely(nr_bytes <= 0)) | 
 | 2028 | 				break; | 
 | 2029 | 		} | 
 | 2030 | 	} | 
 | 2031 |  | 
 | 2032 | 	/* | 
 | 2033 | 	 * completely done | 
 | 2034 | 	 */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2035 | 	if (!req->bio) { | 
 | 2036 | 		/* | 
 | 2037 | 		 * Reset counters so that the request stacking driver | 
 | 2038 | 		 * can find how many bytes remain in the request | 
 | 2039 | 		 * later. | 
 | 2040 | 		 */ | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2041 | 		req->__data_len = 0; | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2042 | 		return false; | 
 | 2043 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2044 |  | 
 | 2045 | 	/* | 
 | 2046 | 	 * if the request wasn't completed, update state | 
 | 2047 | 	 */ | 
 | 2048 | 	if (bio_nbytes) { | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 2049 | 		req_bio_endio(req, bio, bio_nbytes, error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2050 | 		bio->bi_idx += next_idx; | 
 | 2051 | 		bio_iovec(bio)->bv_offset += nr_bytes; | 
 | 2052 | 		bio_iovec(bio)->bv_len -= nr_bytes; | 
 | 2053 | 	} | 
 | 2054 |  | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2055 | 	req->__data_len -= total_bytes; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2056 | 	req->buffer = bio_data(req->bio); | 
 | 2057 |  | 
 | 2058 | 	/* update sector only for requests with clear definition of sector */ | 
 | 2059 | 	if (blk_fs_request(req) || blk_discard_rq(req)) | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2060 | 		req->__sector += total_bytes >> 9; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2061 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2062 | 	/* mixed attributes always follow the first bio */ | 
 | 2063 | 	if (req->cmd_flags & REQ_MIXED_MERGE) { | 
 | 2064 | 		req->cmd_flags &= ~REQ_FAILFAST_MASK; | 
 | 2065 | 		req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; | 
 | 2066 | 	} | 
 | 2067 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2068 | 	/* | 
 | 2069 | 	 * If total number of sectors is less than the first segment | 
 | 2070 | 	 * size, something has gone terribly wrong. | 
 | 2071 | 	 */ | 
 | 2072 | 	if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { | 
 | 2073 | 		printk(KERN_ERR "blk: request botched\n"); | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2074 | 		req->__data_len = blk_rq_cur_bytes(req); | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2075 | 	} | 
 | 2076 |  | 
 | 2077 | 	/* recalculate the number of segments */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | 	blk_recalc_rq_segments(req); | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2079 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2080 | 	return true; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2081 | } | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2082 | EXPORT_SYMBOL_GPL(blk_update_request); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2083 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2084 | static bool blk_update_bidi_request(struct request *rq, int error, | 
 | 2085 | 				    unsigned int nr_bytes, | 
 | 2086 | 				    unsigned int bidi_bytes) | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2087 | { | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2088 | 	if (blk_update_request(rq, error, nr_bytes)) | 
 | 2089 | 		return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2090 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2091 | 	/* Bidi request must be completed as a whole */ | 
 | 2092 | 	if (unlikely(blk_bidi_rq(rq)) && | 
 | 2093 | 	    blk_update_request(rq->next_rq, error, bidi_bytes)) | 
 | 2094 | 		return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2095 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2096 | 	add_disk_randomness(rq->rq_disk); | 
 | 2097 |  | 
 | 2098 | 	return false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2099 | } | 
 | 2100 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2101 | /* | 
 | 2102 |  * queue lock must be held | 
 | 2103 |  */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2104 | static void blk_finish_request(struct request *req, int error) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2105 | { | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2106 | 	if (blk_rq_tagged(req)) | 
 | 2107 | 		blk_queue_end_tag(req->q, req); | 
 | 2108 |  | 
| James Bottomley | ba396a6 | 2009-05-27 14:17:08 +0200 | [diff] [blame] | 2109 | 	BUG_ON(blk_queued_rq(req)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2110 |  | 
 | 2111 | 	if (unlikely(laptop_mode) && blk_fs_request(req)) | 
 | 2112 | 		laptop_io_completion(); | 
 | 2113 |  | 
| Mike Anderson | e78042e | 2008-10-30 02:16:20 -0700 | [diff] [blame] | 2114 | 	blk_delete_timer(req); | 
 | 2115 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2116 | 	blk_account_io_done(req); | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2117 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2118 | 	if (req->end_io) | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 2119 | 		req->end_io(req, error); | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2120 | 	else { | 
 | 2121 | 		if (blk_bidi_rq(req)) | 
 | 2122 | 			__blk_put_request(req->next_rq->q, req->next_rq); | 
 | 2123 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2124 | 		__blk_put_request(req->q, req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2125 | 	} | 
 | 2126 | } | 
 | 2127 |  | 
| Kiyoshi Ueda | 3b11313 | 2007-12-11 17:41:17 -0500 | [diff] [blame] | 2128 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2129 |  * blk_end_bidi_request - Complete a bidi request | 
 | 2130 |  * @rq:         the request to complete | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 2131 |  * @error:      %0 for success, < %0 for error | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2132 |  * @nr_bytes:   number of bytes to complete @rq | 
 | 2133 |  * @bidi_bytes: number of bytes to complete @rq->next_rq | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2134 |  * | 
 | 2135 |  * Description: | 
 | 2136 |  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2137 |  *     Drivers that supports bidi can safely call this member for any | 
 | 2138 |  *     type of request, bidi or uni.  In the later case @bidi_bytes is | 
 | 2139 |  *     just ignored. | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2140 |  * | 
 | 2141 |  * Return: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2142 |  *     %false - we are done with this request | 
 | 2143 |  *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2144 |  **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2145 | static bool blk_end_bidi_request(struct request *rq, int error, | 
 | 2146 | 				 unsigned int nr_bytes, unsigned int bidi_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2147 | { | 
 | 2148 | 	struct request_queue *q = rq->q; | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2149 | 	unsigned long flags; | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2150 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2151 | 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) | 
 | 2152 | 		return true; | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2153 |  | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2154 | 	spin_lock_irqsave(q->queue_lock, flags); | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2155 | 	blk_finish_request(rq, error); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2156 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 2157 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2158 | 	return false; | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2159 | } | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2160 |  | 
 | 2161 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2162 |  * __blk_end_bidi_request - Complete a bidi request with queue lock held | 
 | 2163 |  * @rq:         the request to complete | 
 | 2164 |  * @error:      %0 for success, < %0 for error | 
 | 2165 |  * @nr_bytes:   number of bytes to complete @rq | 
 | 2166 |  * @bidi_bytes: number of bytes to complete @rq->next_rq | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2167 |  * | 
 | 2168 |  * Description: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2169 |  *     Identical to blk_end_bidi_request() except that queue lock is | 
 | 2170 |  *     assumed to be locked on entry and remains so on return. | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2171 |  * | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2172 |  * Return: | 
 | 2173 |  *     %false - we are done with this request | 
 | 2174 |  *     %true  - still buffers pending for this request | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2175 |  **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2176 | static bool __blk_end_bidi_request(struct request *rq, int error, | 
 | 2177 | 				   unsigned int nr_bytes, unsigned int bidi_bytes) | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2178 | { | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2179 | 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) | 
 | 2180 | 		return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2181 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2182 | 	blk_finish_request(rq, error); | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2183 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2184 | 	return false; | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2185 | } | 
 | 2186 |  | 
 | 2187 | /** | 
 | 2188 |  * blk_end_request - Helper function for drivers to complete the request. | 
 | 2189 |  * @rq:       the request being processed | 
 | 2190 |  * @error:    %0 for success, < %0 for error | 
 | 2191 |  * @nr_bytes: number of bytes to complete | 
 | 2192 |  * | 
 | 2193 |  * Description: | 
 | 2194 |  *     Ends I/O on a number of bytes attached to @rq. | 
 | 2195 |  *     If @rq has leftover, sets it up for the next range of segments. | 
 | 2196 |  * | 
 | 2197 |  * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2198 |  *     %false - we are done with this request | 
 | 2199 |  *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2200 |  **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2201 | bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2202 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2203 | 	return blk_end_bidi_request(rq, error, nr_bytes, 0); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2204 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2205 | EXPORT_SYMBOL(blk_end_request); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2206 |  | 
 | 2207 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2208 |  * blk_end_request_all - Helper function for drives to finish the request. | 
 | 2209 |  * @rq: the request to finish | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2210 |  * @error: %0 for success, < %0 for error | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2211 |  * | 
 | 2212 |  * Description: | 
 | 2213 |  *     Completely finish @rq. | 
 | 2214 |  */ | 
 | 2215 | void blk_end_request_all(struct request *rq, int error) | 
 | 2216 | { | 
 | 2217 | 	bool pending; | 
 | 2218 | 	unsigned int bidi_bytes = 0; | 
 | 2219 |  | 
 | 2220 | 	if (unlikely(blk_bidi_rq(rq))) | 
 | 2221 | 		bidi_bytes = blk_rq_bytes(rq->next_rq); | 
 | 2222 |  | 
 | 2223 | 	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); | 
 | 2224 | 	BUG_ON(pending); | 
 | 2225 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2226 | EXPORT_SYMBOL(blk_end_request_all); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2227 |  | 
 | 2228 | /** | 
 | 2229 |  * blk_end_request_cur - Helper function to finish the current request chunk. | 
 | 2230 |  * @rq: the request to finish the current chunk for | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2231 |  * @error: %0 for success, < %0 for error | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2232 |  * | 
 | 2233 |  * Description: | 
 | 2234 |  *     Complete the current consecutively mapped chunk from @rq. | 
 | 2235 |  * | 
 | 2236 |  * Return: | 
 | 2237 |  *     %false - we are done with this request | 
 | 2238 |  *     %true  - still buffers pending for this request | 
 | 2239 |  */ | 
 | 2240 | bool blk_end_request_cur(struct request *rq, int error) | 
 | 2241 | { | 
 | 2242 | 	return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 
 | 2243 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2244 | EXPORT_SYMBOL(blk_end_request_cur); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2245 |  | 
 | 2246 | /** | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2247 |  * blk_end_request_err - Finish a request till the next failure boundary. | 
 | 2248 |  * @rq: the request to finish till the next failure boundary for | 
 | 2249 |  * @error: must be negative errno | 
 | 2250 |  * | 
 | 2251 |  * Description: | 
 | 2252 |  *     Complete @rq till the next failure boundary. | 
 | 2253 |  * | 
 | 2254 |  * Return: | 
 | 2255 |  *     %false - we are done with this request | 
 | 2256 |  *     %true  - still buffers pending for this request | 
 | 2257 |  */ | 
 | 2258 | bool blk_end_request_err(struct request *rq, int error) | 
 | 2259 | { | 
 | 2260 | 	WARN_ON(error >= 0); | 
 | 2261 | 	return blk_end_request(rq, error, blk_rq_err_bytes(rq)); | 
 | 2262 | } | 
 | 2263 | EXPORT_SYMBOL_GPL(blk_end_request_err); | 
 | 2264 |  | 
 | 2265 | /** | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2266 |  * __blk_end_request - Helper function for drivers to complete the request. | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2267 |  * @rq:       the request being processed | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2268 |  * @error:    %0 for success, < %0 for error | 
 | 2269 |  * @nr_bytes: number of bytes to complete | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2270 |  * | 
 | 2271 |  * Description: | 
 | 2272 |  *     Must be called with queue lock held unlike blk_end_request(). | 
 | 2273 |  * | 
 | 2274 |  * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2275 |  *     %false - we are done with this request | 
 | 2276 |  *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2277 |  **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2278 | bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2279 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2280 | 	return __blk_end_bidi_request(rq, error, nr_bytes, 0); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2281 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2282 | EXPORT_SYMBOL(__blk_end_request); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2283 |  | 
 | 2284 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2285 |  * __blk_end_request_all - Helper function for drives to finish the request. | 
 | 2286 |  * @rq: the request to finish | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2287 |  * @error: %0 for success, < %0 for error | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2288 |  * | 
 | 2289 |  * Description: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2290 |  *     Completely finish @rq.  Must be called with queue lock held. | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2291 |  */ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2292 | void __blk_end_request_all(struct request *rq, int error) | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2293 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2294 | 	bool pending; | 
 | 2295 | 	unsigned int bidi_bytes = 0; | 
 | 2296 |  | 
 | 2297 | 	if (unlikely(blk_bidi_rq(rq))) | 
 | 2298 | 		bidi_bytes = blk_rq_bytes(rq->next_rq); | 
 | 2299 |  | 
 | 2300 | 	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); | 
 | 2301 | 	BUG_ON(pending); | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2302 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2303 | EXPORT_SYMBOL(__blk_end_request_all); | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2304 |  | 
 | 2305 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2306 |  * __blk_end_request_cur - Helper function to finish the current request chunk. | 
 | 2307 |  * @rq: the request to finish the current chunk for | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2308 |  * @error: %0 for success, < %0 for error | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2309 |  * | 
 | 2310 |  * Description: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2311 |  *     Complete the current consecutively mapped chunk from @rq.  Must | 
 | 2312 |  *     be called with queue lock held. | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2313 |  * | 
 | 2314 |  * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2315 |  *     %false - we are done with this request | 
 | 2316 |  *     %true  - still buffers pending for this request | 
 | 2317 |  */ | 
 | 2318 | bool __blk_end_request_cur(struct request *rq, int error) | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2319 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2320 | 	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2321 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2322 | EXPORT_SYMBOL(__blk_end_request_cur); | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2323 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2324 | /** | 
 | 2325 |  * __blk_end_request_err - Finish a request till the next failure boundary. | 
 | 2326 |  * @rq: the request to finish till the next failure boundary for | 
 | 2327 |  * @error: must be negative errno | 
 | 2328 |  * | 
 | 2329 |  * Description: | 
 | 2330 |  *     Complete @rq till the next failure boundary.  Must be called | 
 | 2331 |  *     with queue lock held. | 
 | 2332 |  * | 
 | 2333 |  * Return: | 
 | 2334 |  *     %false - we are done with this request | 
 | 2335 |  *     %true  - still buffers pending for this request | 
 | 2336 |  */ | 
 | 2337 | bool __blk_end_request_err(struct request *rq, int error) | 
 | 2338 | { | 
 | 2339 | 	WARN_ON(error >= 0); | 
 | 2340 | 	return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); | 
 | 2341 | } | 
 | 2342 | EXPORT_SYMBOL_GPL(__blk_end_request_err); | 
 | 2343 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 2344 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 
 | 2345 | 		     struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2346 | { | 
| Tejun Heo | a82afdf | 2009-07-03 17:48:16 +0900 | [diff] [blame] | 2347 | 	/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ | 
 | 2348 | 	rq->cmd_flags |= bio->bi_rw & REQ_RW; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2349 |  | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 2350 | 	if (bio_has_data(bio)) { | 
 | 2351 | 		rq->nr_phys_segments = bio_phys_segments(q, bio); | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 2352 | 		rq->buffer = bio_data(bio); | 
 | 2353 | 	} | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2354 | 	rq->__data_len = bio->bi_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2355 | 	rq->bio = rq->biotail = bio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2356 |  | 
| NeilBrown | 6684657 | 2007-08-16 13:31:28 +0200 | [diff] [blame] | 2357 | 	if (bio->bi_bdev) | 
 | 2358 | 		rq->rq_disk = bio->bi_bdev->bd_disk; | 
 | 2359 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2360 |  | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 2361 | /** | 
 | 2362 |  * blk_lld_busy - Check if underlying low-level drivers of a device are busy | 
 | 2363 |  * @q : the queue of the device being checked | 
 | 2364 |  * | 
 | 2365 |  * Description: | 
 | 2366 |  *    Check if underlying low-level drivers of a device are busy. | 
 | 2367 |  *    If the drivers want to export their busy state, they must set own | 
 | 2368 |  *    exporting function using blk_queue_lld_busy() first. | 
 | 2369 |  * | 
 | 2370 |  *    Basically, this function is used only by request stacking drivers | 
 | 2371 |  *    to stop dispatching requests to underlying devices when underlying | 
 | 2372 |  *    devices are busy.  This behavior helps more I/O merging on the queue | 
 | 2373 |  *    of the request stacking driver and prevents I/O throughput regression | 
 | 2374 |  *    on burst I/O load. | 
 | 2375 |  * | 
 | 2376 |  * Return: | 
 | 2377 |  *    0 - Not busy (The request stacking driver should dispatch request) | 
 | 2378 |  *    1 - Busy (The request stacking driver should stop dispatching request) | 
 | 2379 |  */ | 
 | 2380 | int blk_lld_busy(struct request_queue *q) | 
 | 2381 | { | 
 | 2382 | 	if (q->lld_busy_fn) | 
 | 2383 | 		return q->lld_busy_fn(q); | 
 | 2384 |  | 
 | 2385 | 	return 0; | 
 | 2386 | } | 
 | 2387 | EXPORT_SYMBOL_GPL(blk_lld_busy); | 
 | 2388 |  | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2389 | /** | 
 | 2390 |  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request | 
 | 2391 |  * @rq: the clone request to be cleaned up | 
 | 2392 |  * | 
 | 2393 |  * Description: | 
 | 2394 |  *     Free all bios in @rq for a cloned request. | 
 | 2395 |  */ | 
 | 2396 | void blk_rq_unprep_clone(struct request *rq) | 
 | 2397 | { | 
 | 2398 | 	struct bio *bio; | 
 | 2399 |  | 
 | 2400 | 	while ((bio = rq->bio) != NULL) { | 
 | 2401 | 		rq->bio = bio->bi_next; | 
 | 2402 |  | 
 | 2403 | 		bio_put(bio); | 
 | 2404 | 	} | 
 | 2405 | } | 
 | 2406 | EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); | 
 | 2407 |  | 
 | 2408 | /* | 
 | 2409 |  * Copy attributes of the original request to the clone request. | 
 | 2410 |  * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. | 
 | 2411 |  */ | 
 | 2412 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) | 
 | 2413 | { | 
 | 2414 | 	dst->cpu = src->cpu; | 
 | 2415 | 	dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); | 
 | 2416 | 	dst->cmd_type = src->cmd_type; | 
 | 2417 | 	dst->__sector = blk_rq_pos(src); | 
 | 2418 | 	dst->__data_len = blk_rq_bytes(src); | 
 | 2419 | 	dst->nr_phys_segments = src->nr_phys_segments; | 
 | 2420 | 	dst->ioprio = src->ioprio; | 
 | 2421 | 	dst->extra_len = src->extra_len; | 
 | 2422 | } | 
 | 2423 |  | 
 | 2424 | /** | 
 | 2425 |  * blk_rq_prep_clone - Helper function to setup clone request | 
 | 2426 |  * @rq: the request to be setup | 
 | 2427 |  * @rq_src: original request to be cloned | 
 | 2428 |  * @bs: bio_set that bios for clone are allocated from | 
 | 2429 |  * @gfp_mask: memory allocation mask for bio | 
 | 2430 |  * @bio_ctr: setup function to be called for each clone bio. | 
 | 2431 |  *           Returns %0 for success, non %0 for failure. | 
 | 2432 |  * @data: private data to be passed to @bio_ctr | 
 | 2433 |  * | 
 | 2434 |  * Description: | 
 | 2435 |  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. | 
 | 2436 |  *     The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) | 
 | 2437 |  *     are not copied, and copying such parts is the caller's responsibility. | 
 | 2438 |  *     Also, pages which the original bios are pointing to are not copied | 
 | 2439 |  *     and the cloned bios just point same pages. | 
 | 2440 |  *     So cloned bios must be completed before original bios, which means | 
 | 2441 |  *     the caller must complete @rq before @rq_src. | 
 | 2442 |  */ | 
 | 2443 | int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 
 | 2444 | 		      struct bio_set *bs, gfp_t gfp_mask, | 
 | 2445 | 		      int (*bio_ctr)(struct bio *, struct bio *, void *), | 
 | 2446 | 		      void *data) | 
 | 2447 | { | 
 | 2448 | 	struct bio *bio, *bio_src; | 
 | 2449 |  | 
 | 2450 | 	if (!bs) | 
 | 2451 | 		bs = fs_bio_set; | 
 | 2452 |  | 
 | 2453 | 	blk_rq_init(NULL, rq); | 
 | 2454 |  | 
 | 2455 | 	__rq_for_each_bio(bio_src, rq_src) { | 
 | 2456 | 		bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); | 
 | 2457 | 		if (!bio) | 
 | 2458 | 			goto free_and_out; | 
 | 2459 |  | 
 | 2460 | 		__bio_clone(bio, bio_src); | 
 | 2461 |  | 
 | 2462 | 		if (bio_integrity(bio_src) && | 
| Martin K. Petersen | 7878cba | 2009-06-26 15:37:49 +0200 | [diff] [blame] | 2463 | 		    bio_integrity_clone(bio, bio_src, gfp_mask, bs)) | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2464 | 			goto free_and_out; | 
 | 2465 |  | 
 | 2466 | 		if (bio_ctr && bio_ctr(bio, bio_src, data)) | 
 | 2467 | 			goto free_and_out; | 
 | 2468 |  | 
 | 2469 | 		if (rq->bio) { | 
 | 2470 | 			rq->biotail->bi_next = bio; | 
 | 2471 | 			rq->biotail = bio; | 
 | 2472 | 		} else | 
 | 2473 | 			rq->bio = rq->biotail = bio; | 
 | 2474 | 	} | 
 | 2475 |  | 
 | 2476 | 	__blk_rq_prep_clone(rq, rq_src); | 
 | 2477 |  | 
 | 2478 | 	return 0; | 
 | 2479 |  | 
 | 2480 | free_and_out: | 
 | 2481 | 	if (bio) | 
 | 2482 | 		bio_free(bio, bs); | 
 | 2483 | 	blk_rq_unprep_clone(rq); | 
 | 2484 |  | 
 | 2485 | 	return -ENOMEM; | 
 | 2486 | } | 
 | 2487 | EXPORT_SYMBOL_GPL(blk_rq_prep_clone); | 
 | 2488 |  | 
| Jens Axboe | 18887ad | 2008-07-28 13:08:45 +0200 | [diff] [blame] | 2489 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2490 | { | 
 | 2491 | 	return queue_work(kblockd_workqueue, work); | 
 | 2492 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | EXPORT_SYMBOL(kblockd_schedule_work); | 
 | 2494 |  | 
| Jens Axboe | 8e29675 | 2009-10-03 16:26:03 +0200 | [diff] [blame] | 2495 | int kblockd_schedule_delayed_work(struct request_queue *q, | 
 | 2496 | 				  struct delayed_work *work, | 
 | 2497 | 				  unsigned long delay) | 
 | 2498 | { | 
 | 2499 | 	return queue_delayed_work(kblockd_workqueue, work, delay); | 
 | 2500 | } | 
 | 2501 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | 
 | 2502 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2503 | int __init blk_dev_init(void) | 
 | 2504 | { | 
| Nikanth Karthikesan | 9eb55b0 | 2009-04-27 14:53:54 +0200 | [diff] [blame] | 2505 | 	BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 
 | 2506 | 			sizeof(((struct request *)0)->cmd_flags)); | 
 | 2507 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2508 | 	kblockd_workqueue = create_workqueue("kblockd"); | 
 | 2509 | 	if (!kblockd_workqueue) | 
 | 2510 | 		panic("Failed to create kblockd\n"); | 
 | 2511 |  | 
 | 2512 | 	request_cachep = kmem_cache_create("blkdev_requests", | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 2513 | 			sizeof(struct request), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2514 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 2515 | 	blk_requestq_cachep = kmem_cache_create("blkdev_queue", | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 2516 | 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2517 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2518 | 	return 0; | 
 | 2519 | } | 
 | 2520 |  |