| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Functions related to generic timeout handling of requests. | 
 | 3 |  */ | 
 | 4 | #include <linux/kernel.h> | 
 | 5 | #include <linux/module.h> | 
 | 6 | #include <linux/blkdev.h> | 
| Jens Axboe | 581d4e2 | 2008-09-14 05:56:33 -0700 | [diff] [blame] | 7 | #include <linux/fault-inject.h> | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 8 |  | 
 | 9 | #include "blk.h" | 
 | 10 |  | 
| Jens Axboe | 581d4e2 | 2008-09-14 05:56:33 -0700 | [diff] [blame] | 11 | #ifdef CONFIG_FAIL_IO_TIMEOUT | 
 | 12 |  | 
 | 13 | static DECLARE_FAULT_ATTR(fail_io_timeout); | 
 | 14 |  | 
 | 15 | static int __init setup_fail_io_timeout(char *str) | 
 | 16 | { | 
 | 17 | 	return setup_fault_attr(&fail_io_timeout, str); | 
 | 18 | } | 
 | 19 | __setup("fail_io_timeout=", setup_fail_io_timeout); | 
 | 20 |  | 
 | 21 | int blk_should_fake_timeout(struct request_queue *q) | 
 | 22 | { | 
 | 23 | 	if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) | 
 | 24 | 		return 0; | 
 | 25 |  | 
 | 26 | 	return should_fail(&fail_io_timeout, 1); | 
 | 27 | } | 
 | 28 |  | 
 | 29 | static int __init fail_io_timeout_debugfs(void) | 
 | 30 | { | 
 | 31 | 	return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout"); | 
 | 32 | } | 
 | 33 |  | 
 | 34 | late_initcall(fail_io_timeout_debugfs); | 
 | 35 |  | 
 | 36 | ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr, | 
 | 37 | 			  char *buf) | 
 | 38 | { | 
 | 39 | 	struct gendisk *disk = dev_to_disk(dev); | 
 | 40 | 	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); | 
 | 41 |  | 
 | 42 | 	return sprintf(buf, "%d\n", set != 0); | 
 | 43 | } | 
 | 44 |  | 
 | 45 | ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, | 
 | 46 | 			   const char *buf, size_t count) | 
 | 47 | { | 
 | 48 | 	struct gendisk *disk = dev_to_disk(dev); | 
 | 49 | 	int val; | 
 | 50 |  | 
 | 51 | 	if (count) { | 
 | 52 | 		struct request_queue *q = disk->queue; | 
 | 53 | 		char *p = (char *) buf; | 
 | 54 |  | 
 | 55 | 		val = simple_strtoul(p, &p, 10); | 
 | 56 | 		spin_lock_irq(q->queue_lock); | 
 | 57 | 		if (val) | 
 | 58 | 			queue_flag_set(QUEUE_FLAG_FAIL_IO, q); | 
 | 59 | 		else | 
 | 60 | 			queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); | 
 | 61 | 		spin_unlock_irq(q->queue_lock); | 
 | 62 | 	} | 
 | 63 |  | 
 | 64 | 	return count; | 
 | 65 | } | 
 | 66 |  | 
 | 67 | #endif /* CONFIG_FAIL_IO_TIMEOUT */ | 
 | 68 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 69 | /* | 
 | 70 |  * blk_delete_timer - Delete/cancel timer for a given function. | 
 | 71 |  * @req:	request that we are canceling timer for | 
 | 72 |  * | 
 | 73 |  */ | 
 | 74 | void blk_delete_timer(struct request *req) | 
 | 75 | { | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 76 | 	list_del_init(&req->timeout_list); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 77 | } | 
 | 78 |  | 
 | 79 | static void blk_rq_timed_out(struct request *req) | 
 | 80 | { | 
 | 81 | 	struct request_queue *q = req->q; | 
 | 82 | 	enum blk_eh_timer_return ret; | 
 | 83 |  | 
 | 84 | 	ret = q->rq_timed_out_fn(req); | 
 | 85 | 	switch (ret) { | 
 | 86 | 	case BLK_EH_HANDLED: | 
 | 87 | 		__blk_complete_request(req); | 
 | 88 | 		break; | 
 | 89 | 	case BLK_EH_RESET_TIMER: | 
 | 90 | 		blk_clear_rq_complete(req); | 
 | 91 | 		blk_add_timer(req); | 
 | 92 | 		break; | 
 | 93 | 	case BLK_EH_NOT_HANDLED: | 
 | 94 | 		/* | 
 | 95 | 		 * LLD handles this for now but in the future | 
 | 96 | 		 * we can send a request msg to abort the command | 
 | 97 | 		 * and we can move more of the generic scsi eh code to | 
 | 98 | 		 * the blk layer. | 
 | 99 | 		 */ | 
 | 100 | 		break; | 
 | 101 | 	default: | 
 | 102 | 		printk(KERN_ERR "block: bad eh return: %d\n", ret); | 
 | 103 | 		break; | 
 | 104 | 	} | 
 | 105 | } | 
 | 106 |  | 
 | 107 | void blk_rq_timed_out_timer(unsigned long data) | 
 | 108 | { | 
 | 109 | 	struct request_queue *q = (struct request_queue *) data; | 
| malahal@us.ibm.com | 565e411 | 2008-10-30 08:51:58 +0100 | [diff] [blame] | 110 | 	unsigned long flags, next = 0; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 111 | 	struct request *rq, *tmp; | 
 | 112 |  | 
 | 113 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 114 |  | 
 | 115 | 	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { | 
 | 116 | 		if (time_after_eq(jiffies, rq->deadline)) { | 
 | 117 | 			list_del_init(&rq->timeout_list); | 
 | 118 |  | 
 | 119 | 			/* | 
 | 120 | 			 * Check if we raced with end io completion | 
 | 121 | 			 */ | 
 | 122 | 			if (blk_mark_rq_complete(rq)) | 
 | 123 | 				continue; | 
 | 124 | 			blk_rq_timed_out(rq); | 
| Tejun Heo | 2eef33e | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 125 | 		} else if (!next || time_after(next, rq->deadline)) | 
 | 126 | 			next = rq->deadline; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 127 | 	} | 
 | 128 |  | 
| Jens Axboe | 65d3618 | 2008-10-30 08:53:02 +0100 | [diff] [blame] | 129 | 	/* | 
 | 130 | 	 * next can never be 0 here with the list non-empty, since we always | 
 | 131 | 	 * bump ->deadline to 1 so we can detect if the timer was ever added | 
 | 132 | 	 * or not. See comment in blk_add_timer() | 
 | 133 | 	 */ | 
 | 134 | 	if (next) | 
| Alan Stern | 7838c15 | 2008-11-06 08:42:49 +0100 | [diff] [blame] | 135 | 		mod_timer(&q->timeout, round_jiffies_up(next)); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 136 |  | 
 | 137 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 138 | } | 
 | 139 |  | 
 | 140 | /** | 
 | 141 |  * blk_abort_request -- Request request recovery for the specified command | 
 | 142 |  * @req:	pointer to the request of interest | 
 | 143 |  * | 
 | 144 |  * This function requests that the block layer start recovery for the | 
 | 145 |  * request by deleting the timer and calling the q's timeout function. | 
 | 146 |  * LLDDs who implement their own error recovery MAY ignore the timeout | 
 | 147 |  * event if they generated blk_abort_req. Must hold queue lock. | 
 | 148 |  */ | 
 | 149 | void blk_abort_request(struct request *req) | 
 | 150 | { | 
| Jens Axboe | 7ba1fba | 2008-09-16 09:54:11 -0700 | [diff] [blame] | 151 | 	if (blk_mark_rq_complete(req)) | 
 | 152 | 		return; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 153 | 	blk_delete_timer(req); | 
 | 154 | 	blk_rq_timed_out(req); | 
 | 155 | } | 
 | 156 | EXPORT_SYMBOL_GPL(blk_abort_request); | 
 | 157 |  | 
 | 158 | /** | 
 | 159 |  * blk_add_timer - Start timeout timer for a single request | 
 | 160 |  * @req:	request that is about to start running. | 
 | 161 |  * | 
 | 162 |  * Notes: | 
 | 163 |  *    Each request has its own timer, and as it is added to the queue, we | 
 | 164 |  *    set up the timer. When the request completes, we cancel the timer. | 
 | 165 |  */ | 
 | 166 | void blk_add_timer(struct request *req) | 
 | 167 | { | 
 | 168 | 	struct request_queue *q = req->q; | 
 | 169 | 	unsigned long expiry; | 
 | 170 |  | 
 | 171 | 	if (!q->rq_timed_out_fn) | 
 | 172 | 		return; | 
 | 173 |  | 
 | 174 | 	BUG_ON(!list_empty(&req->timeout_list)); | 
 | 175 | 	BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); | 
 | 176 |  | 
| Tejun Heo | 2eef33e | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 177 | 	/* | 
 | 178 | 	 * Some LLDs, like scsi, peek at the timeout to prevent a | 
 | 179 | 	 * command from being retried forever. | 
 | 180 | 	 */ | 
 | 181 | 	if (!req->timeout) | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 182 | 		req->timeout = q->rq_timeout; | 
| Tejun Heo | 2eef33e | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 183 |  | 
 | 184 | 	req->deadline = jiffies + req->timeout; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 185 | 	list_add_tail(&req->timeout_list, &q->timeout_list); | 
 | 186 |  | 
 | 187 | 	/* | 
 | 188 | 	 * If the timer isn't already pending or this timeout is earlier | 
| Alan Stern | 7838c15 | 2008-11-06 08:42:49 +0100 | [diff] [blame] | 189 | 	 * than an existing one, modify the timer. Round up to next nearest | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 190 | 	 * second. | 
 | 191 | 	 */ | 
| Alan Stern | 7838c15 | 2008-11-06 08:42:49 +0100 | [diff] [blame] | 192 | 	expiry = round_jiffies_up(req->deadline); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 193 |  | 
 | 194 | 	if (!timer_pending(&q->timeout) || | 
 | 195 | 	    time_before(expiry, q->timeout.expires)) | 
 | 196 | 		mod_timer(&q->timeout, expiry); | 
 | 197 | } | 
| Mike Anderson | 11914a5 | 2008-09-13 20:31:27 +0200 | [diff] [blame] | 198 |  | 
 | 199 | /** | 
 | 200 |  * blk_abort_queue -- Abort all request on given queue | 
 | 201 |  * @queue:	pointer to queue | 
 | 202 |  * | 
 | 203 |  */ | 
 | 204 | void blk_abort_queue(struct request_queue *q) | 
 | 205 | { | 
 | 206 | 	unsigned long flags; | 
 | 207 | 	struct request *rq, *tmp; | 
| Hannes Reinecke | be987fd | 2009-02-18 10:30:15 +0100 | [diff] [blame] | 208 | 	LIST_HEAD(list); | 
| Mike Anderson | 11914a5 | 2008-09-13 20:31:27 +0200 | [diff] [blame] | 209 |  | 
| Jens Axboe | b759113 | 2009-04-17 08:36:50 +0200 | [diff] [blame] | 210 | 	/* | 
 | 211 | 	 * Not a request based block device, nothing to abort | 
 | 212 | 	 */ | 
 | 213 | 	if (!q->request_fn) | 
 | 214 | 		return; | 
 | 215 |  | 
| Mike Anderson | 11914a5 | 2008-09-13 20:31:27 +0200 | [diff] [blame] | 216 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 217 |  | 
 | 218 | 	elv_abort_queue(q); | 
 | 219 |  | 
| Hannes Reinecke | be987fd | 2009-02-18 10:30:15 +0100 | [diff] [blame] | 220 | 	/* | 
 | 221 | 	 * Splice entries to local list, to avoid deadlocking if entries | 
 | 222 | 	 * get readded to the timeout list by error handling | 
 | 223 | 	 */ | 
 | 224 | 	list_splice_init(&q->timeout_list, &list); | 
 | 225 |  | 
 | 226 | 	list_for_each_entry_safe(rq, tmp, &list, timeout_list) | 
| Mike Anderson | 11914a5 | 2008-09-13 20:31:27 +0200 | [diff] [blame] | 227 | 		blk_abort_request(rq); | 
 | 228 |  | 
| Hannes Reinecke | 17d5c8c | 2009-04-23 10:32:59 +0200 | [diff] [blame] | 229 | 	/* | 
 | 230 | 	 * Occasionally, blk_abort_request() will return without | 
 | 231 | 	 * deleting the element from the list. Make sure we add those back | 
 | 232 | 	 * instead of leaving them on the local stack list. | 
 | 233 | 	 */ | 
 | 234 | 	list_splice(&list, &q->timeout_list); | 
 | 235 |  | 
| Mike Anderson | 11914a5 | 2008-09-13 20:31:27 +0200 | [diff] [blame] | 236 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 237 |  | 
 | 238 | } | 
 | 239 | EXPORT_SYMBOL_GPL(blk_abort_queue); |