| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  *  Anticipatory & deadline i/o scheduler. | 
 | 3 |  * | 
| Jens Axboe | 0fe2347 | 2006-09-04 15:41:16 +0200 | [diff] [blame] | 4 |  *  Copyright (C) 2002 Jens Axboe <axboe@kernel.dk> | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 5 |  *                     Nick Piggin <nickpiggin@yahoo.com.au> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  */ | 
 | 8 | #include <linux/kernel.h> | 
 | 9 | #include <linux/fs.h> | 
 | 10 | #include <linux/blkdev.h> | 
 | 11 | #include <linux/elevator.h> | 
 | 12 | #include <linux/bio.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/module.h> | 
 | 14 | #include <linux/slab.h> | 
 | 15 | #include <linux/init.h> | 
 | 16 | #include <linux/compiler.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/rbtree.h> | 
 | 18 | #include <linux/interrupt.h> | 
 | 19 |  | 
 | 20 | #define REQ_SYNC	1 | 
 | 21 | #define REQ_ASYNC	0 | 
 | 22 |  | 
 | 23 | /* | 
 | 24 |  * See Documentation/block/as-iosched.txt | 
 | 25 |  */ | 
 | 26 |  | 
 | 27 | /* | 
 | 28 |  * max time before a read is submitted. | 
 | 29 |  */ | 
 | 30 | #define default_read_expire (HZ / 8) | 
 | 31 |  | 
 | 32 | /* | 
 | 33 |  * ditto for writes, these limits are not hard, even | 
 | 34 |  * if the disk is capable of satisfying them. | 
 | 35 |  */ | 
 | 36 | #define default_write_expire (HZ / 4) | 
 | 37 |  | 
 | 38 | /* | 
 | 39 |  * read_batch_expire describes how long we will allow a stream of reads to | 
 | 40 |  * persist before looking to see whether it is time to switch over to writes. | 
 | 41 |  */ | 
 | 42 | #define default_read_batch_expire (HZ / 2) | 
 | 43 |  | 
 | 44 | /* | 
 | 45 |  * write_batch_expire describes how long we want a stream of writes to run for. | 
 | 46 |  * This is not a hard limit, but a target we set for the auto-tuning thingy. | 
 | 47 |  * See, the problem is: we can send a lot of writes to disk cache / TCQ in | 
 | 48 |  * a short amount of time... | 
 | 49 |  */ | 
 | 50 | #define default_write_batch_expire (HZ / 8) | 
 | 51 |  | 
 | 52 | /* | 
 | 53 |  * max time we may wait to anticipate a read (default around 6ms) | 
 | 54 |  */ | 
 | 55 | #define default_antic_expire ((HZ / 150) ? HZ / 150 : 1) | 
 | 56 |  | 
 | 57 | /* | 
 | 58 |  * Keep track of up to 20ms thinktimes. We can go as big as we like here, | 
 | 59 |  * however huge values tend to interfere and not decay fast enough. A program | 
 | 60 |  * might be in a non-io phase of operation. Waiting on user input for example, | 
 | 61 |  * or doing a lengthy computation. A small penalty can be justified there, and | 
 | 62 |  * will still catch out those processes that constantly have large thinktimes. | 
 | 63 |  */ | 
 | 64 | #define MAX_THINKTIME (HZ/50UL) | 
 | 65 |  | 
 | 66 | /* Bits in as_io_context.state */ | 
 | 67 | enum as_io_states { | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 68 | 	AS_TASK_RUNNING=0,	/* Process has not exited */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | 	AS_TASK_IOSTARTED,	/* Process has started some IO */ | 
 | 70 | 	AS_TASK_IORUNNING,	/* Process has completed some IO */ | 
 | 71 | }; | 
 | 72 |  | 
 | 73 | enum anticipation_status { | 
 | 74 | 	ANTIC_OFF=0,		/* Not anticipating (normal operation)	*/ | 
 | 75 | 	ANTIC_WAIT_REQ,		/* The last read has not yet completed  */ | 
 | 76 | 	ANTIC_WAIT_NEXT,	/* Currently anticipating a request vs | 
 | 77 | 				   last read (which has completed) */ | 
 | 78 | 	ANTIC_FINISHED,		/* Anticipating but have found a candidate | 
 | 79 | 				 * or timed out */ | 
 | 80 | }; | 
 | 81 |  | 
 | 82 | struct as_data { | 
 | 83 | 	/* | 
 | 84 | 	 * run time data | 
 | 85 | 	 */ | 
 | 86 |  | 
 | 87 | 	struct request_queue *q;	/* the "owner" queue */ | 
 | 88 |  | 
 | 89 | 	/* | 
 | 90 | 	 * requests (as_rq s) are present on both sort_list and fifo_list | 
 | 91 | 	 */ | 
 | 92 | 	struct rb_root sort_list[2]; | 
 | 93 | 	struct list_head fifo_list[2]; | 
 | 94 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 95 | 	struct request *next_rq[2];	/* next in sort order */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | 	sector_t last_sector[2];	/* last REQ_SYNC & REQ_ASYNC sectors */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 |  | 
 | 98 | 	unsigned long exit_prob;	/* probability a task will exit while | 
 | 99 | 					   being waited on */ | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 100 | 	unsigned long exit_no_coop;	/* probablility an exited task will | 
 | 101 | 					   not be part of a later cooperating | 
 | 102 | 					   request */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | 	unsigned long new_ttime_total; 	/* mean thinktime on new proc */ | 
 | 104 | 	unsigned long new_ttime_mean; | 
 | 105 | 	u64 new_seek_total;		/* mean seek on new proc */ | 
 | 106 | 	sector_t new_seek_mean; | 
 | 107 |  | 
 | 108 | 	unsigned long current_batch_expires; | 
 | 109 | 	unsigned long last_check_fifo[2]; | 
 | 110 | 	int changed_batch;		/* 1: waiting for old batch to end */ | 
 | 111 | 	int new_batch;			/* 1: waiting on first read complete */ | 
 | 112 | 	int batch_data_dir;		/* current batch REQ_SYNC / REQ_ASYNC */ | 
 | 113 | 	int write_batch_count;		/* max # of reqs in a write batch */ | 
 | 114 | 	int current_write_count;	/* how many requests left this batch */ | 
 | 115 | 	int write_batch_idled;		/* has the write batch gone idle? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 |  | 
 | 117 | 	enum anticipation_status antic_status; | 
 | 118 | 	unsigned long antic_start;	/* jiffies: when it started */ | 
 | 119 | 	struct timer_list antic_timer;	/* anticipatory scheduling timer */ | 
 | 120 | 	struct work_struct antic_work;	/* Deferred unplugging */ | 
 | 121 | 	struct io_context *io_context;	/* Identify the expected process */ | 
 | 122 | 	int ioc_finished; /* IO associated with io_context is finished */ | 
 | 123 | 	int nr_dispatched; | 
 | 124 |  | 
 | 125 | 	/* | 
 | 126 | 	 * settings that change how the i/o scheduler behaves | 
 | 127 | 	 */ | 
 | 128 | 	unsigned long fifo_expire[2]; | 
 | 129 | 	unsigned long batch_expire[2]; | 
 | 130 | 	unsigned long antic_expire; | 
 | 131 | }; | 
 | 132 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | /* | 
 | 134 |  * per-request data. | 
 | 135 |  */ | 
 | 136 | enum arq_state { | 
 | 137 | 	AS_RQ_NEW=0,		/* New - not referenced and not on any lists */ | 
 | 138 | 	AS_RQ_QUEUED,		/* In the request queue. It belongs to the | 
 | 139 | 				   scheduler */ | 
 | 140 | 	AS_RQ_DISPATCHED,	/* On the dispatch list. It belongs to the | 
 | 141 | 				   driver now */ | 
 | 142 | 	AS_RQ_PRESCHED,		/* Debug poisoning for requests being used */ | 
 | 143 | 	AS_RQ_REMOVED, | 
 | 144 | 	AS_RQ_MERGED, | 
 | 145 | 	AS_RQ_POSTSCHED,	/* when they shouldn't be */ | 
 | 146 | }; | 
 | 147 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 148 | #define RQ_IOC(rq)	((struct io_context *) (rq)->elevator_private) | 
 | 149 | #define RQ_STATE(rq)	((enum arq_state)(rq)->elevator_private2) | 
 | 150 | #define RQ_SET_STATE(rq, state)	((rq)->elevator_private2 = (void *) state) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 |  | 
| Jens Axboe | e4313dd | 2006-07-19 05:10:01 +0200 | [diff] [blame] | 152 | static DEFINE_PER_CPU(unsigned long, ioc_count); | 
| Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 153 | static struct completion *ioc_gone; | 
 | 154 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 155 | static void as_move_to_dispatch(struct as_data *ad, struct request *rq); | 
| Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 156 | static void as_antic_stop(struct as_data *ad); | 
 | 157 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | /* | 
 | 159 |  * IO Context helper functions | 
 | 160 |  */ | 
 | 161 |  | 
 | 162 | /* Called to deallocate the as_io_context */ | 
 | 163 | static void free_as_io_context(struct as_io_context *aic) | 
 | 164 | { | 
 | 165 | 	kfree(aic); | 
| Jens Axboe | e4313dd | 2006-07-19 05:10:01 +0200 | [diff] [blame] | 166 | 	elv_ioc_count_dec(ioc_count); | 
 | 167 | 	if (ioc_gone && !elv_ioc_count_read(ioc_count)) | 
| Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 168 | 		complete(ioc_gone); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | } | 
 | 170 |  | 
| Al Viro | e17a948 | 2006-03-18 13:21:20 -0500 | [diff] [blame] | 171 | static void as_trim(struct io_context *ioc) | 
 | 172 | { | 
| Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 173 | 	if (ioc->aic) | 
 | 174 | 		free_as_io_context(ioc->aic); | 
| Al Viro | e17a948 | 2006-03-18 13:21:20 -0500 | [diff] [blame] | 175 | 	ioc->aic = NULL; | 
 | 176 | } | 
 | 177 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | /* Called when the task exits */ | 
 | 179 | static void exit_as_io_context(struct as_io_context *aic) | 
 | 180 | { | 
 | 181 | 	WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state)); | 
 | 182 | 	clear_bit(AS_TASK_RUNNING, &aic->state); | 
 | 183 | } | 
 | 184 |  | 
 | 185 | static struct as_io_context *alloc_as_io_context(void) | 
 | 186 | { | 
 | 187 | 	struct as_io_context *ret; | 
 | 188 |  | 
 | 189 | 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC); | 
 | 190 | 	if (ret) { | 
 | 191 | 		ret->dtor = free_as_io_context; | 
 | 192 | 		ret->exit = exit_as_io_context; | 
 | 193 | 		ret->state = 1 << AS_TASK_RUNNING; | 
 | 194 | 		atomic_set(&ret->nr_queued, 0); | 
 | 195 | 		atomic_set(&ret->nr_dispatched, 0); | 
 | 196 | 		spin_lock_init(&ret->lock); | 
 | 197 | 		ret->ttime_total = 0; | 
 | 198 | 		ret->ttime_samples = 0; | 
 | 199 | 		ret->ttime_mean = 0; | 
 | 200 | 		ret->seek_total = 0; | 
 | 201 | 		ret->seek_samples = 0; | 
 | 202 | 		ret->seek_mean = 0; | 
| Jens Axboe | e4313dd | 2006-07-19 05:10:01 +0200 | [diff] [blame] | 203 | 		elv_ioc_count_inc(ioc_count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | 	} | 
 | 205 |  | 
 | 206 | 	return ret; | 
 | 207 | } | 
 | 208 |  | 
 | 209 | /* | 
 | 210 |  * If the current task has no AS IO context then create one and initialise it. | 
 | 211 |  * Then take a ref on the task's io context and return it. | 
 | 212 |  */ | 
| Jens Axboe | b5deef9 | 2006-07-19 23:39:40 +0200 | [diff] [blame] | 213 | static struct io_context *as_get_io_context(int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | { | 
| Jens Axboe | b5deef9 | 2006-07-19 23:39:40 +0200 | [diff] [blame] | 215 | 	struct io_context *ioc = get_io_context(GFP_ATOMIC, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | 	if (ioc && !ioc->aic) { | 
 | 217 | 		ioc->aic = alloc_as_io_context(); | 
 | 218 | 		if (!ioc->aic) { | 
 | 219 | 			put_io_context(ioc); | 
 | 220 | 			ioc = NULL; | 
 | 221 | 		} | 
 | 222 | 	} | 
 | 223 | 	return ioc; | 
 | 224 | } | 
 | 225 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 226 | static void as_put_io_context(struct request *rq) | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 227 | { | 
 | 228 | 	struct as_io_context *aic; | 
 | 229 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 230 | 	if (unlikely(!RQ_IOC(rq))) | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 231 | 		return; | 
 | 232 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 233 | 	aic = RQ_IOC(rq)->aic; | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 234 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 235 | 	if (rq_is_sync(rq) && aic) { | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 236 | 		spin_lock(&aic->lock); | 
 | 237 | 		set_bit(AS_TASK_IORUNNING, &aic->state); | 
 | 238 | 		aic->last_end_request = jiffies; | 
 | 239 | 		spin_unlock(&aic->lock); | 
 | 240 | 	} | 
 | 241 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 242 | 	put_io_context(RQ_IOC(rq)); | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 243 | } | 
 | 244 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 |  * rb tree support functions | 
 | 247 |  */ | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 248 | #define RQ_RB_ROOT(ad, rq)	(&(ad)->sort_list[rq_is_sync((rq))]) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 250 | static void as_add_rq_rb(struct as_data *ad, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | { | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 252 | 	struct request *alias; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 |  | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 254 | 	while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) { | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 255 | 		as_move_to_dispatch(ad, alias); | 
| Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 256 | 		as_antic_stop(ad); | 
 | 257 | 	} | 
 | 258 | } | 
 | 259 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 260 | static inline void as_del_rq_rb(struct as_data *ad, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | { | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 262 | 	elv_rb_del(RQ_RB_ROOT(ad, rq), rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | } | 
 | 264 |  | 
 | 265 | /* | 
 | 266 |  * IO Scheduler proper | 
 | 267 |  */ | 
 | 268 |  | 
 | 269 | #define MAXBACK (1024 * 1024)	/* | 
 | 270 | 				 * Maximum distance the disk will go backward | 
 | 271 | 				 * for a request. | 
 | 272 | 				 */ | 
 | 273 |  | 
 | 274 | #define BACK_PENALTY	2 | 
 | 275 |  | 
 | 276 | /* | 
 | 277 |  * as_choose_req selects the preferred one of two requests of the same data_dir | 
 | 278 |  * ignoring time - eg. timeouts, which is the job of as_dispatch_request | 
 | 279 |  */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 280 | static struct request * | 
 | 281 | as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | { | 
 | 283 | 	int data_dir; | 
 | 284 | 	sector_t last, s1, s2, d1, d2; | 
 | 285 | 	int r1_wrap=0, r2_wrap=0;	/* requests are behind the disk head */ | 
 | 286 | 	const sector_t maxback = MAXBACK; | 
 | 287 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 288 | 	if (rq1 == NULL || rq1 == rq2) | 
 | 289 | 		return rq2; | 
 | 290 | 	if (rq2 == NULL) | 
 | 291 | 		return rq1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 293 | 	data_dir = rq_is_sync(rq1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 |  | 
 | 295 | 	last = ad->last_sector[data_dir]; | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 296 | 	s1 = rq1->sector; | 
 | 297 | 	s2 = rq2->sector; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 299 | 	BUG_ON(data_dir != rq_is_sync(rq2)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 |  | 
 | 301 | 	/* | 
 | 302 | 	 * Strict one way elevator _except_ in the case where we allow | 
 | 303 | 	 * short backward seeks which are biased as twice the cost of a | 
 | 304 | 	 * similar forward seek. | 
 | 305 | 	 */ | 
 | 306 | 	if (s1 >= last) | 
 | 307 | 		d1 = s1 - last; | 
 | 308 | 	else if (s1+maxback >= last) | 
 | 309 | 		d1 = (last - s1)*BACK_PENALTY; | 
 | 310 | 	else { | 
 | 311 | 		r1_wrap = 1; | 
 | 312 | 		d1 = 0; /* shut up, gcc */ | 
 | 313 | 	} | 
 | 314 |  | 
 | 315 | 	if (s2 >= last) | 
 | 316 | 		d2 = s2 - last; | 
 | 317 | 	else if (s2+maxback >= last) | 
 | 318 | 		d2 = (last - s2)*BACK_PENALTY; | 
 | 319 | 	else { | 
 | 320 | 		r2_wrap = 1; | 
 | 321 | 		d2 = 0; | 
 | 322 | 	} | 
 | 323 |  | 
 | 324 | 	/* Found required data */ | 
 | 325 | 	if (!r1_wrap && r2_wrap) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 326 | 		return rq1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | 	else if (!r2_wrap && r1_wrap) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 328 | 		return rq2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | 	else if (r1_wrap && r2_wrap) { | 
 | 330 | 		/* both behind the head */ | 
 | 331 | 		if (s1 <= s2) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 332 | 			return rq1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | 		else | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 334 | 			return rq2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 	} | 
 | 336 |  | 
 | 337 | 	/* Both requests in front of the head */ | 
 | 338 | 	if (d1 < d2) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 339 | 		return rq1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | 	else if (d2 < d1) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 341 | 		return rq2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | 	else { | 
 | 343 | 		if (s1 >= s2) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 344 | 			return rq1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | 		else | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 346 | 			return rq2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | 	} | 
 | 348 | } | 
 | 349 |  | 
 | 350 | /* | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 351 |  * as_find_next_rq finds the next request after @prev in elevator order. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 |  * this with as_choose_req form the basis for how the scheduler chooses | 
 | 353 |  * what request to process next. Anticipation works on top of this. | 
 | 354 |  */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 355 | static struct request * | 
 | 356 | as_find_next_rq(struct as_data *ad, struct request *last) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | 	struct rb_node *rbnext = rb_next(&last->rb_node); | 
 | 359 | 	struct rb_node *rbprev = rb_prev(&last->rb_node); | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 360 | 	struct request *next = NULL, *prev = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 |  | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 362 | 	BUG_ON(RB_EMPTY_NODE(&last->rb_node)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 |  | 
 | 364 | 	if (rbprev) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 365 | 		prev = rb_entry_rq(rbprev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 |  | 
 | 367 | 	if (rbnext) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 368 | 		next = rb_entry_rq(rbnext); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | 	else { | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 370 | 		const int data_dir = rq_is_sync(last); | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 371 |  | 
 | 372 | 		rbnext = rb_first(&ad->sort_list[data_dir]); | 
 | 373 | 		if (rbnext && rbnext != &last->rb_node) | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 374 | 			next = rb_entry_rq(rbnext); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | 	} | 
 | 376 |  | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 377 | 	return as_choose_req(ad, next, prev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | } | 
 | 379 |  | 
 | 380 | /* | 
 | 381 |  * anticipatory scheduling functions follow | 
 | 382 |  */ | 
 | 383 |  | 
 | 384 | /* | 
 | 385 |  * as_antic_expired tells us when we have anticipated too long. | 
 | 386 |  * The funny "absolute difference" math on the elapsed time is to handle | 
 | 387 |  * jiffy wraps, and disks which have been idle for 0x80000000 jiffies. | 
 | 388 |  */ | 
 | 389 | static int as_antic_expired(struct as_data *ad) | 
 | 390 | { | 
 | 391 | 	long delta_jif; | 
 | 392 |  | 
 | 393 | 	delta_jif = jiffies - ad->antic_start; | 
 | 394 | 	if (unlikely(delta_jif < 0)) | 
 | 395 | 		delta_jif = -delta_jif; | 
 | 396 | 	if (delta_jif < ad->antic_expire) | 
 | 397 | 		return 0; | 
 | 398 |  | 
 | 399 | 	return 1; | 
 | 400 | } | 
 | 401 |  | 
 | 402 | /* | 
 | 403 |  * as_antic_waitnext starts anticipating that a nice request will soon be | 
 | 404 |  * submitted. See also as_antic_waitreq | 
 | 405 |  */ | 
 | 406 | static void as_antic_waitnext(struct as_data *ad) | 
 | 407 | { | 
 | 408 | 	unsigned long timeout; | 
 | 409 |  | 
 | 410 | 	BUG_ON(ad->antic_status != ANTIC_OFF | 
 | 411 | 			&& ad->antic_status != ANTIC_WAIT_REQ); | 
 | 412 |  | 
 | 413 | 	timeout = ad->antic_start + ad->antic_expire; | 
 | 414 |  | 
 | 415 | 	mod_timer(&ad->antic_timer, timeout); | 
 | 416 |  | 
 | 417 | 	ad->antic_status = ANTIC_WAIT_NEXT; | 
 | 418 | } | 
 | 419 |  | 
 | 420 | /* | 
 | 421 |  * as_antic_waitreq starts anticipating. We don't start timing the anticipation | 
 | 422 |  * until the request that we're anticipating on has finished. This means we | 
 | 423 |  * are timing from when the candidate process wakes up hopefully. | 
 | 424 |  */ | 
 | 425 | static void as_antic_waitreq(struct as_data *ad) | 
 | 426 | { | 
 | 427 | 	BUG_ON(ad->antic_status == ANTIC_FINISHED); | 
 | 428 | 	if (ad->antic_status == ANTIC_OFF) { | 
 | 429 | 		if (!ad->io_context || ad->ioc_finished) | 
 | 430 | 			as_antic_waitnext(ad); | 
 | 431 | 		else | 
 | 432 | 			ad->antic_status = ANTIC_WAIT_REQ; | 
 | 433 | 	} | 
 | 434 | } | 
 | 435 |  | 
 | 436 | /* | 
 | 437 |  * This is called directly by the functions in this file to stop anticipation. | 
 | 438 |  * We kill the timer and schedule a call to the request_fn asap. | 
 | 439 |  */ | 
 | 440 | static void as_antic_stop(struct as_data *ad) | 
 | 441 | { | 
 | 442 | 	int status = ad->antic_status; | 
 | 443 |  | 
 | 444 | 	if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) { | 
 | 445 | 		if (status == ANTIC_WAIT_NEXT) | 
 | 446 | 			del_timer(&ad->antic_timer); | 
 | 447 | 		ad->antic_status = ANTIC_FINISHED; | 
 | 448 | 		/* see as_work_handler */ | 
 | 449 | 		kblockd_schedule_work(&ad->antic_work); | 
 | 450 | 	} | 
 | 451 | } | 
 | 452 |  | 
 | 453 | /* | 
 | 454 |  * as_antic_timeout is the timer function set by as_antic_waitnext. | 
 | 455 |  */ | 
 | 456 | static void as_antic_timeout(unsigned long data) | 
 | 457 | { | 
 | 458 | 	struct request_queue *q = (struct request_queue *)data; | 
 | 459 | 	struct as_data *ad = q->elevator->elevator_data; | 
 | 460 | 	unsigned long flags; | 
 | 461 |  | 
 | 462 | 	spin_lock_irqsave(q->queue_lock, flags); | 
 | 463 | 	if (ad->antic_status == ANTIC_WAIT_REQ | 
 | 464 | 			|| ad->antic_status == ANTIC_WAIT_NEXT) { | 
 | 465 | 		struct as_io_context *aic = ad->io_context->aic; | 
 | 466 |  | 
 | 467 | 		ad->antic_status = ANTIC_FINISHED; | 
 | 468 | 		kblockd_schedule_work(&ad->antic_work); | 
 | 469 |  | 
 | 470 | 		if (aic->ttime_samples == 0) { | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 471 | 			/* process anticipated on has exited or timed out*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | 			ad->exit_prob = (7*ad->exit_prob + 256)/8; | 
 | 473 | 		} | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 474 | 		if (!test_bit(AS_TASK_RUNNING, &aic->state)) { | 
 | 475 | 			/* process not "saved" by a cooperating request */ | 
 | 476 | 			ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8; | 
 | 477 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | 	} | 
 | 479 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 480 | } | 
 | 481 |  | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 482 | static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, | 
 | 483 | 				unsigned long ttime) | 
 | 484 | { | 
 | 485 | 	/* fixed point: 1.0 == 1<<8 */ | 
 | 486 | 	if (aic->ttime_samples == 0) { | 
 | 487 | 		ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8; | 
 | 488 | 		ad->new_ttime_mean = ad->new_ttime_total / 256; | 
 | 489 |  | 
 | 490 | 		ad->exit_prob = (7*ad->exit_prob)/8; | 
 | 491 | 	} | 
 | 492 | 	aic->ttime_samples = (7*aic->ttime_samples + 256) / 8; | 
 | 493 | 	aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8; | 
 | 494 | 	aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples; | 
 | 495 | } | 
 | 496 |  | 
 | 497 | static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, | 
 | 498 | 				sector_t sdist) | 
 | 499 | { | 
 | 500 | 	u64 total; | 
 | 501 |  | 
 | 502 | 	if (aic->seek_samples == 0) { | 
 | 503 | 		ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8; | 
 | 504 | 		ad->new_seek_mean = ad->new_seek_total / 256; | 
 | 505 | 	} | 
 | 506 |  | 
 | 507 | 	/* | 
 | 508 | 	 * Don't allow the seek distance to get too large from the | 
 | 509 | 	 * odd fragment, pagein, etc | 
 | 510 | 	 */ | 
 | 511 | 	if (aic->seek_samples <= 60) /* second&third seek */ | 
 | 512 | 		sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024); | 
 | 513 | 	else | 
 | 514 | 		sdist = min(sdist, (aic->seek_mean * 4)	+ 2*1024*64); | 
 | 515 |  | 
 | 516 | 	aic->seek_samples = (7*aic->seek_samples + 256) / 8; | 
 | 517 | 	aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8; | 
 | 518 | 	total = aic->seek_total + (aic->seek_samples/2); | 
 | 519 | 	do_div(total, aic->seek_samples); | 
 | 520 | 	aic->seek_mean = (sector_t)total; | 
 | 521 | } | 
 | 522 |  | 
 | 523 | /* | 
 | 524 |  * as_update_iohist keeps a decaying histogram of IO thinktimes, and | 
 | 525 |  * updates @aic->ttime_mean based on that. It is called when a new | 
 | 526 |  * request is queued. | 
 | 527 |  */ | 
 | 528 | static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, | 
 | 529 | 				struct request *rq) | 
 | 530 | { | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 531 | 	int data_dir = rq_is_sync(rq); | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 532 | 	unsigned long thinktime = 0; | 
 | 533 | 	sector_t seek_dist; | 
 | 534 |  | 
 | 535 | 	if (aic == NULL) | 
 | 536 | 		return; | 
 | 537 |  | 
 | 538 | 	if (data_dir == REQ_SYNC) { | 
 | 539 | 		unsigned long in_flight = atomic_read(&aic->nr_queued) | 
 | 540 | 					+ atomic_read(&aic->nr_dispatched); | 
 | 541 | 		spin_lock(&aic->lock); | 
 | 542 | 		if (test_bit(AS_TASK_IORUNNING, &aic->state) || | 
 | 543 | 			test_bit(AS_TASK_IOSTARTED, &aic->state)) { | 
 | 544 | 			/* Calculate read -> read thinktime */ | 
 | 545 | 			if (test_bit(AS_TASK_IORUNNING, &aic->state) | 
 | 546 | 							&& in_flight == 0) { | 
 | 547 | 				thinktime = jiffies - aic->last_end_request; | 
 | 548 | 				thinktime = min(thinktime, MAX_THINKTIME-1); | 
 | 549 | 			} | 
 | 550 | 			as_update_thinktime(ad, aic, thinktime); | 
 | 551 |  | 
 | 552 | 			/* Calculate read -> read seek distance */ | 
 | 553 | 			if (aic->last_request_pos < rq->sector) | 
 | 554 | 				seek_dist = rq->sector - aic->last_request_pos; | 
 | 555 | 			else | 
 | 556 | 				seek_dist = aic->last_request_pos - rq->sector; | 
 | 557 | 			as_update_seekdist(ad, aic, seek_dist); | 
 | 558 | 		} | 
 | 559 | 		aic->last_request_pos = rq->sector + rq->nr_sectors; | 
 | 560 | 		set_bit(AS_TASK_IOSTARTED, &aic->state); | 
 | 561 | 		spin_unlock(&aic->lock); | 
 | 562 | 	} | 
 | 563 | } | 
 | 564 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | /* | 
 | 566 |  * as_close_req decides if one request is considered "close" to the | 
 | 567 |  * previous one issued. | 
 | 568 |  */ | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 569 | static int as_close_req(struct as_data *ad, struct as_io_context *aic, | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 570 | 			struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | { | 
 | 572 | 	unsigned long delay;	/* milliseconds */ | 
 | 573 | 	sector_t last = ad->last_sector[ad->batch_data_dir]; | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 574 | 	sector_t next = rq->sector; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | 	sector_t delta; /* acceptable close offset (in sectors) */ | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 576 | 	sector_t s; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 |  | 
 | 578 | 	if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished) | 
 | 579 | 		delay = 0; | 
 | 580 | 	else | 
 | 581 | 		delay = ((jiffies - ad->antic_start) * 1000) / HZ; | 
 | 582 |  | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 583 | 	if (delay == 0) | 
 | 584 | 		delta = 8192; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | 	else if (delay <= 20 && delay <= ad->antic_expire) | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 586 | 		delta = 8192 << delay; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | 	else | 
 | 588 | 		return 1; | 
 | 589 |  | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 590 | 	if ((last <= next + (delta>>1)) && (next <= last + delta)) | 
 | 591 | 		return 1; | 
 | 592 |  | 
 | 593 | 	if (last < next) | 
 | 594 | 		s = next - last; | 
 | 595 | 	else | 
 | 596 | 		s = last - next; | 
 | 597 |  | 
 | 598 | 	if (aic->seek_samples == 0) { | 
 | 599 | 		/* | 
 | 600 | 		 * Process has just started IO. Use past statistics to | 
 | 601 | 		 * gauge success possibility | 
 | 602 | 		 */ | 
 | 603 | 		if (ad->new_seek_mean > s) { | 
 | 604 | 			/* this request is better than what we're expecting */ | 
 | 605 | 			return 1; | 
 | 606 | 		} | 
 | 607 |  | 
 | 608 | 	} else { | 
 | 609 | 		if (aic->seek_mean > s) { | 
 | 610 | 			/* this request is better than what we're expecting */ | 
 | 611 | 			return 1; | 
 | 612 | 		} | 
 | 613 | 	} | 
 | 614 |  | 
 | 615 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | } | 
 | 617 |  | 
 | 618 | /* | 
 | 619 |  * as_can_break_anticipation returns true if we have been anticipating this | 
 | 620 |  * request. | 
 | 621 |  * | 
 | 622 |  * It also returns true if the process against which we are anticipating | 
 | 623 |  * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to | 
 | 624 |  * dispatch it ASAP, because we know that application will not be submitting | 
 | 625 |  * any new reads. | 
 | 626 |  * | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 627 |  * If the task which has submitted the request has exited, break anticipation. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 |  * | 
 | 629 |  * If this task has queued some other IO, do not enter enticipation. | 
 | 630 |  */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 631 | static int as_can_break_anticipation(struct as_data *ad, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | { | 
 | 633 | 	struct io_context *ioc; | 
 | 634 | 	struct as_io_context *aic; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 |  | 
 | 636 | 	ioc = ad->io_context; | 
 | 637 | 	BUG_ON(!ioc); | 
 | 638 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 639 | 	if (rq && ioc == RQ_IOC(rq)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | 		/* request from same process */ | 
 | 641 | 		return 1; | 
 | 642 | 	} | 
 | 643 |  | 
 | 644 | 	if (ad->ioc_finished && as_antic_expired(ad)) { | 
 | 645 | 		/* | 
 | 646 | 		 * In this situation status should really be FINISHED, | 
 | 647 | 		 * however the timer hasn't had the chance to run yet. | 
 | 648 | 		 */ | 
 | 649 | 		return 1; | 
 | 650 | 	} | 
 | 651 |  | 
 | 652 | 	aic = ioc->aic; | 
 | 653 | 	if (!aic) | 
 | 654 | 		return 0; | 
 | 655 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | 	if (atomic_read(&aic->nr_queued) > 0) { | 
 | 657 | 		/* process has more requests queued */ | 
 | 658 | 		return 1; | 
 | 659 | 	} | 
 | 660 |  | 
 | 661 | 	if (atomic_read(&aic->nr_dispatched) > 0) { | 
 | 662 | 		/* process has more requests dispatched */ | 
 | 663 | 		return 1; | 
 | 664 | 	} | 
 | 665 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 666 | 	if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | 		/* | 
 | 668 | 		 * Found a close request that is not one of ours. | 
 | 669 | 		 * | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 670 | 		 * This makes close requests from another process update | 
 | 671 | 		 * our IO history. Is generally useful when there are | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | 		 * two or more cooperating processes working in the same | 
 | 673 | 		 * area. | 
 | 674 | 		 */ | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 675 | 		if (!test_bit(AS_TASK_RUNNING, &aic->state)) { | 
 | 676 | 			if (aic->ttime_samples == 0) | 
 | 677 | 				ad->exit_prob = (7*ad->exit_prob + 256)/8; | 
 | 678 |  | 
 | 679 | 			ad->exit_no_coop = (7*ad->exit_no_coop)/8; | 
 | 680 | 		} | 
 | 681 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 682 | 		as_update_iohist(ad, aic, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | 		return 1; | 
 | 684 | 	} | 
 | 685 |  | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 686 | 	if (!test_bit(AS_TASK_RUNNING, &aic->state)) { | 
 | 687 | 		/* process anticipated on has exited */ | 
 | 688 | 		if (aic->ttime_samples == 0) | 
 | 689 | 			ad->exit_prob = (7*ad->exit_prob + 256)/8; | 
 | 690 |  | 
 | 691 | 		if (ad->exit_no_coop > 128) | 
 | 692 | 			return 1; | 
 | 693 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 |  | 
 | 695 | 	if (aic->ttime_samples == 0) { | 
 | 696 | 		if (ad->new_ttime_mean > ad->antic_expire) | 
 | 697 | 			return 1; | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 698 | 		if (ad->exit_prob * ad->exit_no_coop > 128*256) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | 			return 1; | 
 | 700 | 	} else if (aic->ttime_mean > ad->antic_expire) { | 
 | 701 | 		/* the process thinks too much between requests */ | 
 | 702 | 		return 1; | 
 | 703 | 	} | 
 | 704 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | 	return 0; | 
 | 706 | } | 
 | 707 |  | 
 | 708 | /* | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 709 |  * as_can_anticipate indicates whether we should either run rq | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 |  * or keep anticipating a better request. | 
 | 711 |  */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 712 | static int as_can_anticipate(struct as_data *ad, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | { | 
 | 714 | 	if (!ad->io_context) | 
 | 715 | 		/* | 
 | 716 | 		 * Last request submitted was a write | 
 | 717 | 		 */ | 
 | 718 | 		return 0; | 
 | 719 |  | 
 | 720 | 	if (ad->antic_status == ANTIC_FINISHED) | 
 | 721 | 		/* | 
 | 722 | 		 * Don't restart if we have just finished. Run the next request | 
 | 723 | 		 */ | 
 | 724 | 		return 0; | 
 | 725 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 726 | 	if (as_can_break_anticipation(ad, rq)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | 		/* | 
 | 728 | 		 * This request is a good candidate. Don't keep anticipating, | 
 | 729 | 		 * run it. | 
 | 730 | 		 */ | 
 | 731 | 		return 0; | 
 | 732 |  | 
 | 733 | 	/* | 
 | 734 | 	 * OK from here, we haven't finished, and don't have a decent request! | 
 | 735 | 	 * Status is either ANTIC_OFF so start waiting, | 
 | 736 | 	 * ANTIC_WAIT_REQ so continue waiting for request to finish | 
 | 737 | 	 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | 	 */ | 
 | 739 |  | 
 | 740 | 	return 1; | 
 | 741 | } | 
 | 742 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | /* | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 744 |  * as_update_rq must be called whenever a request (rq) is added to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 |  * the sort_list. This function keeps caches up to date, and checks if the | 
 | 746 |  * request might be one we are "anticipating" | 
 | 747 |  */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 748 | static void as_update_rq(struct as_data *ad, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | { | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 750 | 	const int data_dir = rq_is_sync(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 752 | 	/* keep the next_rq cache up to date */ | 
 | 753 | 	ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 |  | 
 | 755 | 	/* | 
 | 756 | 	 * have we been anticipating this request? | 
 | 757 | 	 * or does it come from the same process as the one we are anticipating | 
 | 758 | 	 * for? | 
 | 759 | 	 */ | 
 | 760 | 	if (ad->antic_status == ANTIC_WAIT_REQ | 
 | 761 | 			|| ad->antic_status == ANTIC_WAIT_NEXT) { | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 762 | 		if (as_can_break_anticipation(ad, rq)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | 			as_antic_stop(ad); | 
 | 764 | 	} | 
 | 765 | } | 
 | 766 |  | 
 | 767 | /* | 
 | 768 |  * Gathers timings and resizes the write batch automatically | 
 | 769 |  */ | 
 | 770 | static void update_write_batch(struct as_data *ad) | 
 | 771 | { | 
 | 772 | 	unsigned long batch = ad->batch_expire[REQ_ASYNC]; | 
 | 773 | 	long write_time; | 
 | 774 |  | 
 | 775 | 	write_time = (jiffies - ad->current_batch_expires) + batch; | 
 | 776 | 	if (write_time < 0) | 
 | 777 | 		write_time = 0; | 
 | 778 |  | 
 | 779 | 	if (write_time > batch && !ad->write_batch_idled) { | 
 | 780 | 		if (write_time > batch * 3) | 
 | 781 | 			ad->write_batch_count /= 2; | 
 | 782 | 		else | 
 | 783 | 			ad->write_batch_count--; | 
 | 784 | 	} else if (write_time < batch && ad->current_write_count == 0) { | 
 | 785 | 		if (batch > write_time * 3) | 
 | 786 | 			ad->write_batch_count *= 2; | 
 | 787 | 		else | 
 | 788 | 			ad->write_batch_count++; | 
 | 789 | 	} | 
 | 790 |  | 
 | 791 | 	if (ad->write_batch_count < 1) | 
 | 792 | 		ad->write_batch_count = 1; | 
 | 793 | } | 
 | 794 |  | 
 | 795 | /* | 
 | 796 |  * as_completed_request is to be called when a request has completed and | 
 | 797 |  * returned something to the requesting process, be it an error or data. | 
 | 798 |  */ | 
 | 799 | static void as_completed_request(request_queue_t *q, struct request *rq) | 
 | 800 | { | 
 | 801 | 	struct as_data *ad = q->elevator->elevator_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 |  | 
 | 803 | 	WARN_ON(!list_empty(&rq->queuelist)); | 
 | 804 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 805 | 	if (RQ_STATE(rq) != AS_RQ_REMOVED) { | 
 | 806 | 		printk("rq->state %d\n", RQ_STATE(rq)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | 		WARN_ON(1); | 
 | 808 | 		goto out; | 
 | 809 | 	} | 
 | 810 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | 	if (ad->changed_batch && ad->nr_dispatched == 1) { | 
 | 812 | 		kblockd_schedule_work(&ad->antic_work); | 
 | 813 | 		ad->changed_batch = 0; | 
 | 814 |  | 
 | 815 | 		if (ad->batch_data_dir == REQ_SYNC) | 
 | 816 | 			ad->new_batch = 1; | 
 | 817 | 	} | 
 | 818 | 	WARN_ON(ad->nr_dispatched == 0); | 
 | 819 | 	ad->nr_dispatched--; | 
 | 820 |  | 
 | 821 | 	/* | 
 | 822 | 	 * Start counting the batch from when a request of that direction is | 
 | 823 | 	 * actually serviced. This should help devices with big TCQ windows | 
 | 824 | 	 * and writeback caches | 
 | 825 | 	 */ | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 826 | 	if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | 		update_write_batch(ad); | 
 | 828 | 		ad->current_batch_expires = jiffies + | 
 | 829 | 				ad->batch_expire[REQ_SYNC]; | 
 | 830 | 		ad->new_batch = 0; | 
 | 831 | 	} | 
 | 832 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 833 | 	if (ad->io_context == RQ_IOC(rq) && ad->io_context) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | 		ad->antic_start = jiffies; | 
 | 835 | 		ad->ioc_finished = 1; | 
 | 836 | 		if (ad->antic_status == ANTIC_WAIT_REQ) { | 
 | 837 | 			/* | 
 | 838 | 			 * We were waiting on this request, now anticipate | 
 | 839 | 			 * the next one | 
 | 840 | 			 */ | 
 | 841 | 			as_antic_waitnext(ad); | 
 | 842 | 		} | 
 | 843 | 	} | 
 | 844 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 845 | 	as_put_io_context(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | out: | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 847 | 	RQ_SET_STATE(rq, AS_RQ_POSTSCHED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | } | 
 | 849 |  | 
 | 850 | /* | 
 | 851 |  * as_remove_queued_request removes a request from the pre dispatch queue | 
 | 852 |  * without updating refcounts. It is expected the caller will drop the | 
 | 853 |  * reference unless it replaces the request at somepart of the elevator | 
 | 854 |  * (ie. the dispatch queue) | 
 | 855 |  */ | 
 | 856 | static void as_remove_queued_request(request_queue_t *q, struct request *rq) | 
 | 857 | { | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 858 | 	const int data_dir = rq_is_sync(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | 	struct as_data *ad = q->elevator->elevator_data; | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 860 | 	struct io_context *ioc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 862 | 	WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 864 | 	ioc = RQ_IOC(rq); | 
 | 865 | 	if (ioc && ioc->aic) { | 
 | 866 | 		BUG_ON(!atomic_read(&ioc->aic->nr_queued)); | 
 | 867 | 		atomic_dec(&ioc->aic->nr_queued); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | 	} | 
 | 869 |  | 
 | 870 | 	/* | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 871 | 	 * Update the "next_rq" cache if we are about to remove its | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | 	 * entry | 
 | 873 | 	 */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 874 | 	if (ad->next_rq[data_dir] == rq) | 
 | 875 | 		ad->next_rq[data_dir] = as_find_next_rq(ad, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 |  | 
| Jens Axboe | d4f2f46 | 2006-07-13 09:12:14 +0200 | [diff] [blame] | 877 | 	rq_fifo_clear(rq); | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 878 | 	as_del_rq_rb(ad, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | } | 
 | 880 |  | 
 | 881 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 |  * as_fifo_expired returns 0 if there are no expired reads on the fifo, | 
 | 883 |  * 1 otherwise.  It is ratelimited so that we only perform the check once per | 
 | 884 |  * `fifo_expire' interval.  Otherwise a large number of expired requests | 
 | 885 |  * would create a hopeless seekstorm. | 
 | 886 |  * | 
 | 887 |  * See as_antic_expired comment. | 
 | 888 |  */ | 
 | 889 | static int as_fifo_expired(struct as_data *ad, int adir) | 
 | 890 | { | 
| Jens Axboe | d4f2f46 | 2006-07-13 09:12:14 +0200 | [diff] [blame] | 891 | 	struct request *rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | 	long delta_jif; | 
 | 893 |  | 
 | 894 | 	delta_jif = jiffies - ad->last_check_fifo[adir]; | 
 | 895 | 	if (unlikely(delta_jif < 0)) | 
 | 896 | 		delta_jif = -delta_jif; | 
 | 897 | 	if (delta_jif < ad->fifo_expire[adir]) | 
 | 898 | 		return 0; | 
 | 899 |  | 
 | 900 | 	ad->last_check_fifo[adir] = jiffies; | 
 | 901 |  | 
 | 902 | 	if (list_empty(&ad->fifo_list[adir])) | 
 | 903 | 		return 0; | 
 | 904 |  | 
| Jens Axboe | d4f2f46 | 2006-07-13 09:12:14 +0200 | [diff] [blame] | 905 | 	rq = rq_entry_fifo(ad->fifo_list[adir].next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 |  | 
| Jens Axboe | d4f2f46 | 2006-07-13 09:12:14 +0200 | [diff] [blame] | 907 | 	return time_after(jiffies, rq_fifo_time(rq)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | } | 
 | 909 |  | 
 | 910 | /* | 
 | 911 |  * as_batch_expired returns true if the current batch has expired. A batch | 
 | 912 |  * is a set of reads or a set of writes. | 
 | 913 |  */ | 
 | 914 | static inline int as_batch_expired(struct as_data *ad) | 
 | 915 | { | 
 | 916 | 	if (ad->changed_batch || ad->new_batch) | 
 | 917 | 		return 0; | 
 | 918 |  | 
 | 919 | 	if (ad->batch_data_dir == REQ_SYNC) | 
 | 920 | 		/* TODO! add a check so a complete fifo gets written? */ | 
 | 921 | 		return time_after(jiffies, ad->current_batch_expires); | 
 | 922 |  | 
 | 923 | 	return time_after(jiffies, ad->current_batch_expires) | 
 | 924 | 		|| ad->current_write_count == 0; | 
 | 925 | } | 
 | 926 |  | 
 | 927 | /* | 
 | 928 |  * move an entry to dispatch queue | 
 | 929 |  */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 930 | static void as_move_to_dispatch(struct as_data *ad, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | { | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 932 | 	const int data_dir = rq_is_sync(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 |  | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 934 | 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 |  | 
 | 936 | 	as_antic_stop(ad); | 
 | 937 | 	ad->antic_status = ANTIC_OFF; | 
 | 938 |  | 
 | 939 | 	/* | 
 | 940 | 	 * This has to be set in order to be correctly updated by | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 941 | 	 * as_find_next_rq | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | 	 */ | 
 | 943 | 	ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; | 
 | 944 |  | 
 | 945 | 	if (data_dir == REQ_SYNC) { | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 946 | 		struct io_context *ioc = RQ_IOC(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | 		/* In case we have to anticipate after this */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 948 | 		copy_io_context(&ad->io_context, &ioc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | 	} else { | 
 | 950 | 		if (ad->io_context) { | 
 | 951 | 			put_io_context(ad->io_context); | 
 | 952 | 			ad->io_context = NULL; | 
 | 953 | 		} | 
 | 954 |  | 
 | 955 | 		if (ad->current_write_count != 0) | 
 | 956 | 			ad->current_write_count--; | 
 | 957 | 	} | 
 | 958 | 	ad->ioc_finished = 0; | 
 | 959 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 960 | 	ad->next_rq[data_dir] = as_find_next_rq(ad, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 |  | 
 | 962 | 	/* | 
 | 963 | 	 * take it off the sort and fifo list, add to dispatch queue | 
 | 964 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | 	as_remove_queued_request(ad->q, rq); | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 966 | 	WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 |  | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 968 | 	elv_dispatch_sort(ad->q, rq); | 
 | 969 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 970 | 	RQ_SET_STATE(rq, AS_RQ_DISPATCHED); | 
 | 971 | 	if (RQ_IOC(rq) && RQ_IOC(rq)->aic) | 
 | 972 | 		atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | 	ad->nr_dispatched++; | 
 | 974 | } | 
 | 975 |  | 
 | 976 | /* | 
 | 977 |  * as_dispatch_request selects the best request according to | 
 | 978 |  * read/write expire, batch expire, etc, and moves it to the dispatch | 
 | 979 |  * queue. Returns 1 if a request was found, 0 otherwise. | 
 | 980 |  */ | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 981 | static int as_dispatch_request(request_queue_t *q, int force) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | { | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 983 | 	struct as_data *ad = q->elevator->elevator_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | 	const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); | 
 | 985 | 	const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 986 | 	struct request *rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 |  | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 988 | 	if (unlikely(force)) { | 
 | 989 | 		/* | 
 | 990 | 		 * Forced dispatch, accounting is useless.  Reset | 
 | 991 | 		 * accounting states and dump fifo_lists.  Note that | 
 | 992 | 		 * batch_data_dir is reset to REQ_SYNC to avoid | 
 | 993 | 		 * screwing write batch accounting as write batch | 
 | 994 | 		 * accounting occurs on W->R transition. | 
 | 995 | 		 */ | 
 | 996 | 		int dispatched = 0; | 
 | 997 |  | 
 | 998 | 		ad->batch_data_dir = REQ_SYNC; | 
 | 999 | 		ad->changed_batch = 0; | 
 | 1000 | 		ad->new_batch = 0; | 
 | 1001 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1002 | 		while (ad->next_rq[REQ_SYNC]) { | 
 | 1003 | 			as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]); | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1004 | 			dispatched++; | 
 | 1005 | 		} | 
 | 1006 | 		ad->last_check_fifo[REQ_SYNC] = jiffies; | 
 | 1007 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1008 | 		while (ad->next_rq[REQ_ASYNC]) { | 
 | 1009 | 			as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]); | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1010 | 			dispatched++; | 
 | 1011 | 		} | 
 | 1012 | 		ad->last_check_fifo[REQ_ASYNC] = jiffies; | 
 | 1013 |  | 
 | 1014 | 		return dispatched; | 
 | 1015 | 	} | 
 | 1016 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | 	/* Signal that the write batch was uncontended, so we can't time it */ | 
 | 1018 | 	if (ad->batch_data_dir == REQ_ASYNC && !reads) { | 
 | 1019 | 		if (ad->current_write_count == 0 || !writes) | 
 | 1020 | 			ad->write_batch_idled = 1; | 
 | 1021 | 	} | 
 | 1022 |  | 
 | 1023 | 	if (!(reads || writes) | 
 | 1024 | 		|| ad->antic_status == ANTIC_WAIT_REQ | 
 | 1025 | 		|| ad->antic_status == ANTIC_WAIT_NEXT | 
 | 1026 | 		|| ad->changed_batch) | 
 | 1027 | 		return 0; | 
 | 1028 |  | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1029 | 	if (!(reads && writes && as_batch_expired(ad))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | 		/* | 
 | 1031 | 		 * batch is still running or no reads or no writes | 
 | 1032 | 		 */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1033 | 		rq = ad->next_rq[ad->batch_data_dir]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 |  | 
 | 1035 | 		if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { | 
 | 1036 | 			if (as_fifo_expired(ad, REQ_SYNC)) | 
 | 1037 | 				goto fifo_expired; | 
 | 1038 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1039 | 			if (as_can_anticipate(ad, rq)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1040 | 				as_antic_waitreq(ad); | 
 | 1041 | 				return 0; | 
 | 1042 | 			} | 
 | 1043 | 		} | 
 | 1044 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1045 | 		if (rq) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | 			/* we have a "next request" */ | 
 | 1047 | 			if (reads && !writes) | 
 | 1048 | 				ad->current_batch_expires = | 
 | 1049 | 					jiffies + ad->batch_expire[REQ_SYNC]; | 
 | 1050 | 			goto dispatch_request; | 
 | 1051 | 		} | 
 | 1052 | 	} | 
 | 1053 |  | 
 | 1054 | 	/* | 
 | 1055 | 	 * at this point we are not running a batch. select the appropriate | 
 | 1056 | 	 * data direction (read / write) | 
 | 1057 | 	 */ | 
 | 1058 |  | 
 | 1059 | 	if (reads) { | 
| Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 1060 | 		BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 |  | 
 | 1062 | 		if (writes && ad->batch_data_dir == REQ_SYNC) | 
 | 1063 | 			/* | 
 | 1064 | 			 * Last batch was a read, switch to writes | 
 | 1065 | 			 */ | 
 | 1066 | 			goto dispatch_writes; | 
 | 1067 |  | 
 | 1068 | 		if (ad->batch_data_dir == REQ_ASYNC) { | 
 | 1069 | 			WARN_ON(ad->new_batch); | 
 | 1070 | 			ad->changed_batch = 1; | 
 | 1071 | 		} | 
 | 1072 | 		ad->batch_data_dir = REQ_SYNC; | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1073 | 		rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 | 		ad->last_check_fifo[ad->batch_data_dir] = jiffies; | 
 | 1075 | 		goto dispatch_request; | 
 | 1076 | 	} | 
 | 1077 |  | 
 | 1078 | 	/* | 
 | 1079 | 	 * the last batch was a read | 
 | 1080 | 	 */ | 
 | 1081 |  | 
 | 1082 | 	if (writes) { | 
 | 1083 | dispatch_writes: | 
| Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 1084 | 		BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1085 |  | 
 | 1086 | 		if (ad->batch_data_dir == REQ_SYNC) { | 
 | 1087 | 			ad->changed_batch = 1; | 
 | 1088 |  | 
 | 1089 | 			/* | 
 | 1090 | 			 * new_batch might be 1 when the queue runs out of | 
 | 1091 | 			 * reads. A subsequent submission of a write might | 
 | 1092 | 			 * cause a change of batch before the read is finished. | 
 | 1093 | 			 */ | 
 | 1094 | 			ad->new_batch = 0; | 
 | 1095 | 		} | 
 | 1096 | 		ad->batch_data_dir = REQ_ASYNC; | 
 | 1097 | 		ad->current_write_count = ad->write_batch_count; | 
 | 1098 | 		ad->write_batch_idled = 0; | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1099 | 		rq = ad->next_rq[ad->batch_data_dir]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 | 		goto dispatch_request; | 
 | 1101 | 	} | 
 | 1102 |  | 
 | 1103 | 	BUG(); | 
 | 1104 | 	return 0; | 
 | 1105 |  | 
 | 1106 | dispatch_request: | 
 | 1107 | 	/* | 
 | 1108 | 	 * If a request has expired, service it. | 
 | 1109 | 	 */ | 
 | 1110 |  | 
 | 1111 | 	if (as_fifo_expired(ad, ad->batch_data_dir)) { | 
 | 1112 | fifo_expired: | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1113 | 		rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | 	} | 
 | 1115 |  | 
 | 1116 | 	if (ad->changed_batch) { | 
 | 1117 | 		WARN_ON(ad->new_batch); | 
 | 1118 |  | 
 | 1119 | 		if (ad->nr_dispatched) | 
 | 1120 | 			return 0; | 
 | 1121 |  | 
 | 1122 | 		if (ad->batch_data_dir == REQ_ASYNC) | 
 | 1123 | 			ad->current_batch_expires = jiffies + | 
 | 1124 | 					ad->batch_expire[REQ_ASYNC]; | 
 | 1125 | 		else | 
 | 1126 | 			ad->new_batch = 1; | 
 | 1127 |  | 
 | 1128 | 		ad->changed_batch = 0; | 
 | 1129 | 	} | 
 | 1130 |  | 
 | 1131 | 	/* | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1132 | 	 * rq is the selected appropriate request. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | 	 */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1134 | 	as_move_to_dispatch(ad, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 |  | 
 | 1136 | 	return 1; | 
 | 1137 | } | 
 | 1138 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | /* | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1140 |  * add rq to rbtree and fifo | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 |  */ | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1142 | static void as_add_request(request_queue_t *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1143 | { | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1144 | 	struct as_data *ad = q->elevator->elevator_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | 	int data_dir; | 
 | 1146 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1147 | 	RQ_SET_STATE(rq, AS_RQ_NEW); | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1148 |  | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 1149 | 	data_dir = rq_is_sync(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 |  | 
| Jens Axboe | b5deef9 | 2006-07-19 23:39:40 +0200 | [diff] [blame] | 1151 | 	rq->elevator_private = as_get_io_context(q->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1153 | 	if (RQ_IOC(rq)) { | 
 | 1154 | 		as_update_iohist(ad, RQ_IOC(rq)->aic, rq); | 
 | 1155 | 		atomic_inc(&RQ_IOC(rq)->aic->nr_queued); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 | 	} | 
 | 1157 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1158 | 	as_add_rq_rb(ad, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 |  | 
| Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 1160 | 	/* | 
 | 1161 | 	 * set expire time (only used for reads) and add to fifo list | 
 | 1162 | 	 */ | 
| Jens Axboe | d4f2f46 | 2006-07-13 09:12:14 +0200 | [diff] [blame] | 1163 | 	rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); | 
 | 1164 | 	list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1166 | 	as_update_rq(ad, rq); /* keep state machine up to date */ | 
 | 1167 | 	RQ_SET_STATE(rq, AS_RQ_QUEUED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1168 | } | 
 | 1169 |  | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1170 | static void as_activate_request(request_queue_t *q, struct request *rq) | 
 | 1171 | { | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1172 | 	WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); | 
 | 1173 | 	RQ_SET_STATE(rq, AS_RQ_REMOVED); | 
 | 1174 | 	if (RQ_IOC(rq) && RQ_IOC(rq)->aic) | 
 | 1175 | 		atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1176 | } | 
 | 1177 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | static void as_deactivate_request(request_queue_t *q, struct request *rq) | 
 | 1179 | { | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1180 | 	WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); | 
 | 1181 | 	RQ_SET_STATE(rq, AS_RQ_DISPATCHED); | 
 | 1182 | 	if (RQ_IOC(rq) && RQ_IOC(rq)->aic) | 
 | 1183 | 		atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | } | 
 | 1185 |  | 
 | 1186 | /* | 
 | 1187 |  * as_queue_empty tells us if there are requests left in the device. It may | 
 | 1188 |  * not be the case that a driver can get the next request even if the queue | 
 | 1189 |  * is not empty - it is used in the block layer to check for plugging and | 
 | 1190 |  * merging opportunities | 
 | 1191 |  */ | 
 | 1192 | static int as_queue_empty(request_queue_t *q) | 
 | 1193 | { | 
 | 1194 | 	struct as_data *ad = q->elevator->elevator_data; | 
 | 1195 |  | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1196 | 	return list_empty(&ad->fifo_list[REQ_ASYNC]) | 
 | 1197 | 		&& list_empty(&ad->fifo_list[REQ_SYNC]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | } | 
 | 1199 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | static int | 
 | 1201 | as_merge(request_queue_t *q, struct request **req, struct bio *bio) | 
 | 1202 | { | 
 | 1203 | 	struct as_data *ad = q->elevator->elevator_data; | 
 | 1204 | 	sector_t rb_key = bio->bi_sector + bio_sectors(bio); | 
 | 1205 | 	struct request *__rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 |  | 
 | 1207 | 	/* | 
 | 1208 | 	 * check for front merge | 
 | 1209 | 	 */ | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 1210 | 	__rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key); | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 1211 | 	if (__rq && elv_rq_merge_ok(__rq, bio)) { | 
 | 1212 | 		*req = __rq; | 
 | 1213 | 		return ELEVATOR_FRONT_MERGE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | 	} | 
 | 1215 |  | 
 | 1216 | 	return ELEVATOR_NO_MERGE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | } | 
 | 1218 |  | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 1219 | static void as_merged_request(request_queue_t *q, struct request *req, int type) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | { | 
 | 1221 | 	struct as_data *ad = q->elevator->elevator_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 |  | 
 | 1223 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 | 	 * if the merge was a front merge, we need to reposition request | 
 | 1225 | 	 */ | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 1226 | 	if (type == ELEVATOR_FRONT_MERGE) { | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1227 | 		as_del_rq_rb(ad, req); | 
 | 1228 | 		as_add_rq_rb(ad, req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | 		/* | 
 | 1230 | 		 * Note! At this stage of this and the next function, our next | 
 | 1231 | 		 * request may not be optimal - eg the request may have "grown" | 
 | 1232 | 		 * behind the disk head. We currently don't bother adjusting. | 
 | 1233 | 		 */ | 
 | 1234 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | } | 
 | 1236 |  | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1237 | static void as_merged_requests(request_queue_t *q, struct request *req, | 
 | 1238 | 			 	struct request *next) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1239 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1240 | 	/* | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1241 | 	 * if next expires before rq, assign its expire time to arq | 
 | 1242 | 	 * and move into next position (next will be deleted) in fifo | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1243 | 	 */ | 
| Jens Axboe | d4f2f46 | 2006-07-13 09:12:14 +0200 | [diff] [blame] | 1244 | 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | 
 | 1245 | 		if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1246 | 			struct io_context *rioc = RQ_IOC(req); | 
 | 1247 | 			struct io_context *nioc = RQ_IOC(next); | 
 | 1248 |  | 
| Jens Axboe | d4f2f46 | 2006-07-13 09:12:14 +0200 | [diff] [blame] | 1249 | 			list_move(&req->queuelist, &next->queuelist); | 
 | 1250 | 			rq_set_fifo_time(req, rq_fifo_time(next)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | 			/* | 
 | 1252 | 			 * Don't copy here but swap, because when anext is | 
 | 1253 | 			 * removed below, it must contain the unused context | 
 | 1254 | 			 */ | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1255 | 			swap_io_context(&rioc, &nioc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 | 		} | 
 | 1257 | 	} | 
 | 1258 |  | 
 | 1259 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | 	 * kill knowledge of next, this one is a goner | 
 | 1261 | 	 */ | 
 | 1262 | 	as_remove_queued_request(q, next); | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1263 | 	as_put_io_context(next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 |  | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1265 | 	RQ_SET_STATE(next, AS_RQ_MERGED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 | } | 
 | 1267 |  | 
 | 1268 | /* | 
 | 1269 |  * This is executed in a "deferred" process context, by kblockd. It calls the | 
 | 1270 |  * driver's request_fn so the driver can submit that request. | 
 | 1271 |  * | 
 | 1272 |  * IMPORTANT! This guy will reenter the elevator, so set up all queue global | 
 | 1273 |  * state before calling, and don't rely on any state over calls. | 
 | 1274 |  * | 
 | 1275 |  * FIXME! dispatch queue is not a queue at all! | 
 | 1276 |  */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 1277 | static void as_work_handler(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | { | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 1279 | 	struct as_data *ad = container_of(work, struct as_data, antic_work); | 
 | 1280 | 	struct request_queue *q = ad->q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | 	unsigned long flags; | 
 | 1282 |  | 
 | 1283 | 	spin_lock_irqsave(q->queue_lock, flags); | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1284 | 	blk_start_queueing(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | 	spin_unlock_irqrestore(q->queue_lock, flags); | 
 | 1286 | } | 
 | 1287 |  | 
| Jens Axboe | cb78b28 | 2006-07-28 09:32:57 +0200 | [diff] [blame] | 1288 | static int as_may_queue(request_queue_t *q, int rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | { | 
 | 1290 | 	int ret = ELV_MQUEUE_MAY; | 
 | 1291 | 	struct as_data *ad = q->elevator->elevator_data; | 
 | 1292 | 	struct io_context *ioc; | 
 | 1293 | 	if (ad->antic_status == ANTIC_WAIT_REQ || | 
 | 1294 | 			ad->antic_status == ANTIC_WAIT_NEXT) { | 
| Jens Axboe | b5deef9 | 2006-07-19 23:39:40 +0200 | [diff] [blame] | 1295 | 		ioc = as_get_io_context(q->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | 		if (ad->io_context == ioc) | 
 | 1297 | 			ret = ELV_MQUEUE_MUST; | 
 | 1298 | 		put_io_context(ioc); | 
 | 1299 | 	} | 
 | 1300 |  | 
 | 1301 | 	return ret; | 
 | 1302 | } | 
 | 1303 |  | 
 | 1304 | static void as_exit_queue(elevator_t *e) | 
 | 1305 | { | 
 | 1306 | 	struct as_data *ad = e->elevator_data; | 
 | 1307 |  | 
 | 1308 | 	del_timer_sync(&ad->antic_timer); | 
 | 1309 | 	kblockd_flush(); | 
 | 1310 |  | 
 | 1311 | 	BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); | 
 | 1312 | 	BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); | 
 | 1313 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | 	put_io_context(ad->io_context); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1315 | 	kfree(ad); | 
 | 1316 | } | 
 | 1317 |  | 
 | 1318 | /* | 
| Jens Axboe | 8a8e674 | 2006-07-18 21:07:29 +0200 | [diff] [blame] | 1319 |  * initialize elevator private data (as_data). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 |  */ | 
| Jens Axboe | bb37b94 | 2006-12-01 10:42:33 +0100 | [diff] [blame] | 1321 | static void *as_init_queue(request_queue_t *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 | { | 
 | 1323 | 	struct as_data *ad; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1324 |  | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 1325 | 	ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 | 	if (!ad) | 
| Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 1327 | 		return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | 	memset(ad, 0, sizeof(*ad)); | 
 | 1329 |  | 
 | 1330 | 	ad->q = q; /* Identify what queue the data belongs to */ | 
 | 1331 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1332 | 	/* anticipatory scheduling helpers */ | 
 | 1333 | 	ad->antic_timer.function = as_antic_timeout; | 
 | 1334 | 	ad->antic_timer.data = (unsigned long)q; | 
 | 1335 | 	init_timer(&ad->antic_timer); | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 1336 | 	INIT_WORK(&ad->antic_work, as_work_handler); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | 	INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); | 
 | 1339 | 	INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); | 
 | 1340 | 	ad->sort_list[REQ_SYNC] = RB_ROOT; | 
 | 1341 | 	ad->sort_list[REQ_ASYNC] = RB_ROOT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1342 | 	ad->fifo_expire[REQ_SYNC] = default_read_expire; | 
 | 1343 | 	ad->fifo_expire[REQ_ASYNC] = default_write_expire; | 
 | 1344 | 	ad->antic_expire = default_antic_expire; | 
 | 1345 | 	ad->batch_expire[REQ_SYNC] = default_read_batch_expire; | 
 | 1346 | 	ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 |  | 
 | 1348 | 	ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; | 
 | 1349 | 	ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; | 
 | 1350 | 	if (ad->write_batch_count < 2) | 
 | 1351 | 		ad->write_batch_count = 2; | 
 | 1352 |  | 
| Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 1353 | 	return ad; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | } | 
 | 1355 |  | 
 | 1356 | /* | 
 | 1357 |  * sysfs parts below | 
 | 1358 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 |  | 
 | 1360 | static ssize_t | 
 | 1361 | as_var_show(unsigned int var, char *page) | 
 | 1362 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 | 	return sprintf(page, "%d\n", var); | 
 | 1364 | } | 
 | 1365 |  | 
 | 1366 | static ssize_t | 
 | 1367 | as_var_store(unsigned long *var, const char *page, size_t count) | 
 | 1368 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | 	char *p = (char *) page; | 
 | 1370 |  | 
| Jens Axboe | c9b3ad6 | 2005-07-27 11:43:37 -0700 | [diff] [blame] | 1371 | 	*var = simple_strtoul(p, &p, 10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | 	return count; | 
 | 1373 | } | 
 | 1374 |  | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1375 | static ssize_t est_time_show(elevator_t *e, char *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | { | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1377 | 	struct as_data *ad = e->elevator_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | 	int pos = 0; | 
 | 1379 |  | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1380 | 	pos += sprintf(page+pos, "%lu %% exit probability\n", | 
 | 1381 | 				100*ad->exit_prob/256); | 
 | 1382 | 	pos += sprintf(page+pos, "%lu %% probability of exiting without a " | 
 | 1383 | 				"cooperating process submitting IO\n", | 
 | 1384 | 				100*ad->exit_no_coop/256); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1385 | 	pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean); | 
| Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1386 | 	pos += sprintf(page+pos, "%llu sectors new seek distance\n", | 
 | 1387 | 				(unsigned long long)ad->new_seek_mean); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 |  | 
 | 1389 | 	return pos; | 
 | 1390 | } | 
 | 1391 |  | 
 | 1392 | #define SHOW_FUNCTION(__FUNC, __VAR)				\ | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1393 | static ssize_t __FUNC(elevator_t *e, char *page)		\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | {								\ | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1395 | 	struct as_data *ad = e->elevator_data;			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | 	return as_var_show(jiffies_to_msecs((__VAR)), (page));	\ | 
 | 1397 | } | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1398 | SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); | 
 | 1399 | SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); | 
 | 1400 | SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); | 
 | 1401 | SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); | 
 | 1402 | SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | #undef SHOW_FUNCTION | 
 | 1404 |  | 
 | 1405 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)				\ | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1406 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1407 | {									\ | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1408 | 	struct as_data *ad = e->elevator_data;				\ | 
 | 1409 | 	int ret = as_var_store(__PTR, (page), count);			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1410 | 	if (*(__PTR) < (MIN))						\ | 
 | 1411 | 		*(__PTR) = (MIN);					\ | 
 | 1412 | 	else if (*(__PTR) > (MAX))					\ | 
 | 1413 | 		*(__PTR) = (MAX);					\ | 
 | 1414 | 	*(__PTR) = msecs_to_jiffies(*(__PTR));				\ | 
 | 1415 | 	return ret;							\ | 
 | 1416 | } | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1417 | STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); | 
 | 1418 | STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); | 
 | 1419 | STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); | 
 | 1420 | STORE_FUNCTION(as_read_batch_expire_store, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1421 | 			&ad->batch_expire[REQ_SYNC], 0, INT_MAX); | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1422 | STORE_FUNCTION(as_write_batch_expire_store, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | 			&ad->batch_expire[REQ_ASYNC], 0, INT_MAX); | 
 | 1424 | #undef STORE_FUNCTION | 
 | 1425 |  | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1426 | #define AS_ATTR(name) \ | 
 | 1427 | 	__ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 |  | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1429 | static struct elv_fs_entry as_attrs[] = { | 
 | 1430 | 	__ATTR_RO(est_time), | 
 | 1431 | 	AS_ATTR(read_expire), | 
 | 1432 | 	AS_ATTR(write_expire), | 
 | 1433 | 	AS_ATTR(antic_expire), | 
 | 1434 | 	AS_ATTR(read_batch_expire), | 
 | 1435 | 	AS_ATTR(write_batch_expire), | 
 | 1436 | 	__ATTR_NULL | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | }; | 
 | 1438 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1439 | static struct elevator_type iosched_as = { | 
 | 1440 | 	.ops = { | 
 | 1441 | 		.elevator_merge_fn = 		as_merge, | 
 | 1442 | 		.elevator_merged_fn =		as_merged_request, | 
 | 1443 | 		.elevator_merge_req_fn =	as_merged_requests, | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1444 | 		.elevator_dispatch_fn =		as_dispatch_request, | 
 | 1445 | 		.elevator_add_req_fn =		as_add_request, | 
 | 1446 | 		.elevator_activate_req_fn =	as_activate_request, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1447 | 		.elevator_deactivate_req_fn = 	as_deactivate_request, | 
 | 1448 | 		.elevator_queue_empty_fn =	as_queue_empty, | 
 | 1449 | 		.elevator_completed_req_fn =	as_completed_request, | 
| Jens Axboe | e37f346 | 2006-07-18 21:06:01 +0200 | [diff] [blame] | 1450 | 		.elevator_former_req_fn =	elv_rb_former_request, | 
 | 1451 | 		.elevator_latter_req_fn =	elv_rb_latter_request, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1452 | 		.elevator_may_queue_fn =	as_may_queue, | 
 | 1453 | 		.elevator_init_fn =		as_init_queue, | 
 | 1454 | 		.elevator_exit_fn =		as_exit_queue, | 
| Al Viro | e17a948 | 2006-03-18 13:21:20 -0500 | [diff] [blame] | 1455 | 		.trim =				as_trim, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | 	}, | 
 | 1457 |  | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1458 | 	.elevator_attrs = as_attrs, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1459 | 	.elevator_name = "anticipatory", | 
 | 1460 | 	.elevator_owner = THIS_MODULE, | 
 | 1461 | }; | 
 | 1462 |  | 
 | 1463 | static int __init as_init(void) | 
 | 1464 | { | 
| Jens Axboe | c65fb61 | 2006-12-13 13:25:18 +0100 | [diff] [blame] | 1465 | 	return elv_register(&iosched_as); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1466 | } | 
 | 1467 |  | 
 | 1468 | static void __exit as_exit(void) | 
 | 1469 | { | 
| Peter Zijlstra | 6e9a473 | 2006-09-30 23:28:10 -0700 | [diff] [blame] | 1470 | 	DECLARE_COMPLETION_ONSTACK(all_gone); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1471 | 	elv_unregister(&iosched_as); | 
| Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 1472 | 	ioc_gone = &all_gone; | 
| OGAWA Hirofumi | fba8227 | 2006-04-18 09:44:06 +0200 | [diff] [blame] | 1473 | 	/* ioc_gone's update must be visible before reading ioc_count */ | 
 | 1474 | 	smp_wmb(); | 
| Jens Axboe | e4313dd | 2006-07-19 05:10:01 +0200 | [diff] [blame] | 1475 | 	if (elv_ioc_count_read(ioc_count)) | 
| OGAWA Hirofumi | fba8227 | 2006-04-18 09:44:06 +0200 | [diff] [blame] | 1476 | 		wait_for_completion(ioc_gone); | 
| Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 1477 | 	synchronize_rcu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1478 | } | 
 | 1479 |  | 
 | 1480 | module_init(as_init); | 
 | 1481 | module_exit(as_exit); | 
 | 1482 |  | 
 | 1483 | MODULE_AUTHOR("Nick Piggin"); | 
 | 1484 | MODULE_LICENSE("GPL"); | 
 | 1485 | MODULE_DESCRIPTION("anticipatory IO scheduler"); |