| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | *  Deadline i/o scheduler. | 
|  | 3 | * | 
| Jens Axboe | 0fe2347 | 2006-09-04 15:41:16 +0200 | [diff] [blame] | 4 | *  Copyright (C) 2002 Jens Axboe <axboe@kernel.dk> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ | 
|  | 6 | #include <linux/kernel.h> | 
|  | 7 | #include <linux/fs.h> | 
|  | 8 | #include <linux/blkdev.h> | 
|  | 9 | #include <linux/elevator.h> | 
|  | 10 | #include <linux/bio.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/module.h> | 
|  | 12 | #include <linux/slab.h> | 
|  | 13 | #include <linux/init.h> | 
|  | 14 | #include <linux/compiler.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/rbtree.h> | 
|  | 16 |  | 
|  | 17 | /* | 
|  | 18 | * See Documentation/block/deadline-iosched.txt | 
|  | 19 | */ | 
| Arjan van de Ven | 6410009 | 2006-01-06 09:46:02 +0100 | [diff] [blame] | 20 | static const int read_expire = HZ / 2;  /* max time before a read is submitted. */ | 
|  | 21 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | 
|  | 22 | static const int writes_starved = 2;    /* max times reads can starve a write */ | 
|  | 23 | static const int fifo_batch = 16;       /* # of sequential requests treated as one | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | by the above parameters. For throughput. */ | 
|  | 25 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | struct deadline_data { | 
|  | 27 | /* | 
|  | 28 | * run time data | 
|  | 29 | */ | 
|  | 30 |  | 
|  | 31 | /* | 
|  | 32 | * requests (deadline_rq s) are present on both sort_list and fifo_list | 
|  | 33 | */ | 
|  | 34 | struct rb_root sort_list[2]; | 
|  | 35 | struct list_head fifo_list[2]; | 
|  | 36 |  | 
|  | 37 | /* | 
|  | 38 | * next in sort order. read, write or both are NULL | 
|  | 39 | */ | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 40 | struct request *next_rq[2]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | unsigned int batching;		/* number of sequential requests made */ | 
|  | 42 | sector_t last_sector;		/* head position */ | 
|  | 43 | unsigned int starved;		/* times reads have starved writes */ | 
|  | 44 |  | 
|  | 45 | /* | 
|  | 46 | * settings that change how the i/o scheduler behaves | 
|  | 47 | */ | 
|  | 48 | int fifo_expire[2]; | 
|  | 49 | int fifo_batch; | 
|  | 50 | int writes_starved; | 
|  | 51 | int front_merges; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | }; | 
|  | 53 |  | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 54 | static void deadline_move_request(struct deadline_data *, struct request *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 56 | #define RQ_RB_ROOT(dd, rq)	(&(dd)->sort_list[rq_data_dir((rq))]) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
| Aaron Carroll | 5d1a536 | 2007-10-30 10:40:12 +0100 | [diff] [blame] | 58 | /* | 
|  | 59 | * get the request after `rq' in sector-sorted order | 
|  | 60 | */ | 
|  | 61 | static inline struct request * | 
|  | 62 | deadline_latter_request(struct request *rq) | 
|  | 63 | { | 
|  | 64 | struct rb_node *node = rb_next(&rq->rb_node); | 
|  | 65 |  | 
|  | 66 | if (node) | 
|  | 67 | return rb_entry_rq(node); | 
|  | 68 |  | 
|  | 69 | return NULL; | 
|  | 70 | } | 
|  | 71 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | static void | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 73 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | { | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 75 | struct rb_root *root = RQ_RB_ROOT(dd, rq); | 
|  | 76 | struct request *__alias; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 |  | 
|  | 78 | retry: | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 79 | __alias = elv_rb_add(root, rq); | 
|  | 80 | if (unlikely(__alias)) { | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 81 | deadline_move_request(dd, __alias); | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 82 | goto retry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | } | 
|  | 85 |  | 
|  | 86 | static inline void | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 87 | deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | { | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 89 | const int data_dir = rq_data_dir(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 |  | 
| Aaron Carroll | 5d1a536 | 2007-10-30 10:40:12 +0100 | [diff] [blame] | 91 | if (dd->next_rq[data_dir] == rq) | 
|  | 92 | dd->next_rq[data_dir] = deadline_latter_request(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 |  | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 94 | elv_rb_del(RQ_RB_ROOT(dd, rq), rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | } | 
|  | 96 |  | 
|  | 97 | /* | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 98 | * add rq to rbtree and fifo | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | */ | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 100 | static void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | deadline_add_request(struct request_queue *q, struct request *rq) | 
|  | 102 | { | 
|  | 103 | struct deadline_data *dd = q->elevator->elevator_data; | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 104 | const int data_dir = rq_data_dir(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 |  | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 106 | deadline_add_rq_rb(dd, rq); | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 107 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | /* | 
|  | 109 | * set expire time (only used for reads) and add to fifo list | 
|  | 110 | */ | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 111 | rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]); | 
|  | 112 | list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | } | 
|  | 114 |  | 
|  | 115 | /* | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 116 | * remove rq from rbtree and fifo. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 118 | static void deadline_remove_request(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | { | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 120 | struct deadline_data *dd = q->elevator->elevator_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 |  | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 122 | rq_fifo_clear(rq); | 
|  | 123 | deadline_del_rq_rb(dd, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } | 
|  | 125 |  | 
|  | 126 | static int | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 127 | deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | { | 
|  | 129 | struct deadline_data *dd = q->elevator->elevator_data; | 
|  | 130 | struct request *__rq; | 
|  | 131 | int ret; | 
|  | 132 |  | 
|  | 133 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | * check for front merge | 
|  | 135 | */ | 
|  | 136 | if (dd->front_merges) { | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 137 | sector_t sector = bio->bi_sector + bio_sectors(bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 |  | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 139 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | if (__rq) { | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 141 | BUG_ON(sector != __rq->sector); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 |  | 
|  | 143 | if (elv_rq_merge_ok(__rq, bio)) { | 
|  | 144 | ret = ELEVATOR_FRONT_MERGE; | 
|  | 145 | goto out; | 
|  | 146 | } | 
|  | 147 | } | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | return ELEVATOR_NO_MERGE; | 
|  | 151 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | *req = __rq; | 
|  | 153 | return ret; | 
|  | 154 | } | 
|  | 155 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 156 | static void deadline_merged_request(struct request_queue *q, | 
|  | 157 | struct request *req, int type) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | { | 
|  | 159 | struct deadline_data *dd = q->elevator->elevator_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 |  | 
|  | 161 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | * if the merge was a front merge, we need to reposition request | 
|  | 163 | */ | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 164 | if (type == ELEVATOR_FRONT_MERGE) { | 
|  | 165 | elv_rb_del(RQ_RB_ROOT(dd, req), req); | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 166 | deadline_add_rq_rb(dd, req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | } | 
|  | 169 |  | 
|  | 170 | static void | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 171 | deadline_merged_requests(struct request_queue *q, struct request *req, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | struct request *next) | 
|  | 173 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | /* | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 175 | * if next expires before rq, assign its expire time to rq | 
|  | 176 | * and move into next position (next will be deleted) in fifo | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | */ | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 178 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | 
|  | 179 | if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { | 
|  | 180 | list_move(&req->queuelist, &next->queuelist); | 
|  | 181 | rq_set_fifo_time(req, rq_fifo_time(next)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 | /* | 
|  | 186 | * kill knowledge of next, this one is a goner | 
|  | 187 | */ | 
|  | 188 | deadline_remove_request(q, next); | 
|  | 189 | } | 
|  | 190 |  | 
|  | 191 | /* | 
|  | 192 | * move request from sort list to dispatch queue. | 
|  | 193 | */ | 
|  | 194 | static inline void | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 195 | deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 197 | struct request_queue *q = rq->q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 |  | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 199 | deadline_remove_request(q, rq); | 
|  | 200 | elv_dispatch_add_tail(q, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | } | 
|  | 202 |  | 
|  | 203 | /* | 
|  | 204 | * move an entry to dispatch queue | 
|  | 205 | */ | 
|  | 206 | static void | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 207 | deadline_move_request(struct deadline_data *dd, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | { | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 209 | const int data_dir = rq_data_dir(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 |  | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 211 | dd->next_rq[READ] = NULL; | 
|  | 212 | dd->next_rq[WRITE] = NULL; | 
| Aaron Carroll | 5d1a536 | 2007-10-30 10:40:12 +0100 | [diff] [blame] | 213 | dd->next_rq[data_dir] = deadline_latter_request(rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 |  | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 215 | dd->last_sector = rq->sector + rq->nr_sectors; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 |  | 
|  | 217 | /* | 
|  | 218 | * take it off the sort and fifo list, move | 
|  | 219 | * to dispatch queue | 
|  | 220 | */ | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 221 | deadline_move_to_dispatch(dd, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } | 
|  | 223 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | /* | 
|  | 225 | * deadline_check_fifo returns 0 if there are no expired reads on the fifo, | 
|  | 226 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | 
|  | 227 | */ | 
|  | 228 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | 
|  | 229 | { | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 230 | struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 |  | 
|  | 232 | /* | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 233 | * rq is expired! | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | */ | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 235 | if (time_after(jiffies, rq_fifo_time(rq))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | return 1; | 
|  | 237 |  | 
|  | 238 | return 0; | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | /* | 
|  | 242 | * deadline_dispatch_requests selects the best request according to | 
|  | 243 | * read/write expire, fifo_batch, etc | 
|  | 244 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 245 | static int deadline_dispatch_requests(struct request_queue *q, int force) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | { | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 247 | struct deadline_data *dd = q->elevator->elevator_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | const int reads = !list_empty(&dd->fifo_list[READ]); | 
|  | 249 | const int writes = !list_empty(&dd->fifo_list[WRITE]); | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 250 | struct request *rq; | 
| Nikita Danilov | 4b0dc07 | 2005-09-06 15:17:20 -0700 | [diff] [blame] | 251 | int data_dir; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 |  | 
|  | 253 | /* | 
|  | 254 | * batches are currently reads XOR writes | 
|  | 255 | */ | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 256 | if (dd->next_rq[WRITE]) | 
|  | 257 | rq = dd->next_rq[WRITE]; | 
| Andrew Morton | 9d5c1e1 | 2005-09-09 13:02:12 -0700 | [diff] [blame] | 258 | else | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 259 | rq = dd->next_rq[READ]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 |  | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 261 | if (rq) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | /* we have a "next request" */ | 
|  | 263 |  | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 264 | if (dd->last_sector != rq->sector) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | /* end the batch on a non sequential request */ | 
|  | 266 | dd->batching += dd->fifo_batch; | 
|  | 267 |  | 
|  | 268 | if (dd->batching < dd->fifo_batch) | 
|  | 269 | /* we are still entitled to batch */ | 
|  | 270 | goto dispatch_request; | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 | /* | 
|  | 274 | * at this point we are not running a batch. select the appropriate | 
|  | 275 | * data direction (read / write) | 
|  | 276 | */ | 
|  | 277 |  | 
|  | 278 | if (reads) { | 
| Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 279 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 |  | 
|  | 281 | if (writes && (dd->starved++ >= dd->writes_starved)) | 
|  | 282 | goto dispatch_writes; | 
|  | 283 |  | 
|  | 284 | data_dir = READ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 |  | 
|  | 286 | goto dispatch_find_request; | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | /* | 
|  | 290 | * there are either no reads or writes have been starved | 
|  | 291 | */ | 
|  | 292 |  | 
|  | 293 | if (writes) { | 
|  | 294 | dispatch_writes: | 
| Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 295 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 |  | 
|  | 297 | dd->starved = 0; | 
|  | 298 |  | 
|  | 299 | data_dir = WRITE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 |  | 
|  | 301 | goto dispatch_find_request; | 
|  | 302 | } | 
|  | 303 |  | 
|  | 304 | return 0; | 
|  | 305 |  | 
|  | 306 | dispatch_find_request: | 
|  | 307 | /* | 
|  | 308 | * we are not running a batch, find best request for selected data_dir | 
|  | 309 | */ | 
| Aaron Carroll | 6f5d8aa | 2007-10-30 10:40:13 +0100 | [diff] [blame] | 310 | if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) { | 
|  | 311 | /* | 
|  | 312 | * A deadline has expired, the last request was in the other | 
|  | 313 | * direction, or we have run out of higher-sectored requests. | 
|  | 314 | * Start again from the request with the earliest expiry time. | 
|  | 315 | */ | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 316 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); | 
| Aaron Carroll | 6f5d8aa | 2007-10-30 10:40:13 +0100 | [diff] [blame] | 317 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | /* | 
|  | 319 | * The last req was the same dir and we have a next request in | 
|  | 320 | * sort order. No expired requests so continue on from here. | 
|  | 321 | */ | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 322 | rq = dd->next_rq[data_dir]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | } | 
|  | 324 |  | 
| Aaron Carroll | dfb3d72 | 2007-10-30 10:40:13 +0100 | [diff] [blame] | 325 | dd->batching = 0; | 
|  | 326 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | dispatch_request: | 
|  | 328 | /* | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 329 | * rq is the selected appropriate request. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | */ | 
|  | 331 | dd->batching++; | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 332 | deadline_move_request(dd, rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 |  | 
|  | 334 | return 1; | 
|  | 335 | } | 
|  | 336 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 337 | static int deadline_queue_empty(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | { | 
|  | 339 | struct deadline_data *dd = q->elevator->elevator_data; | 
|  | 340 |  | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 341 | return list_empty(&dd->fifo_list[WRITE]) | 
|  | 342 | && list_empty(&dd->fifo_list[READ]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | } | 
|  | 344 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | static void deadline_exit_queue(elevator_t *e) | 
|  | 346 | { | 
|  | 347 | struct deadline_data *dd = e->elevator_data; | 
|  | 348 |  | 
|  | 349 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | 
|  | 350 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | 
|  | 351 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | kfree(dd); | 
|  | 353 | } | 
|  | 354 |  | 
|  | 355 | /* | 
| Jens Axboe | 8840faa | 2006-07-13 12:36:41 +0200 | [diff] [blame] | 356 | * initialize elevator private data (deadline_data). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 358 | static void *deadline_init_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | { | 
|  | 360 | struct deadline_data *dd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 |  | 
| Christoph Lameter | 94f6030 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 362 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | if (!dd) | 
| Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 364 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | 
|  | 367 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | 
|  | 368 | dd->sort_list[READ] = RB_ROOT; | 
|  | 369 | dd->sort_list[WRITE] = RB_ROOT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | dd->fifo_expire[READ] = read_expire; | 
|  | 371 | dd->fifo_expire[WRITE] = write_expire; | 
|  | 372 | dd->writes_starved = writes_starved; | 
|  | 373 | dd->front_merges = 1; | 
|  | 374 | dd->fifo_batch = fifo_batch; | 
| Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 375 | return dd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | } | 
|  | 377 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | /* | 
|  | 379 | * sysfs parts below | 
|  | 380 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 |  | 
|  | 382 | static ssize_t | 
|  | 383 | deadline_var_show(int var, char *page) | 
|  | 384 | { | 
|  | 385 | return sprintf(page, "%d\n", var); | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 | static ssize_t | 
|  | 389 | deadline_var_store(int *var, const char *page, size_t count) | 
|  | 390 | { | 
|  | 391 | char *p = (char *) page; | 
|  | 392 |  | 
|  | 393 | *var = simple_strtol(p, &p, 10); | 
|  | 394 | return count; | 
|  | 395 | } | 
|  | 396 |  | 
|  | 397 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\ | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 398 | static ssize_t __FUNC(elevator_t *e, char *page)			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | {									\ | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 400 | struct deadline_data *dd = e->elevator_data;			\ | 
|  | 401 | int __data = __VAR;						\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | if (__CONV)							\ | 
|  | 403 | __data = jiffies_to_msecs(__data);			\ | 
|  | 404 | return deadline_var_show(__data, (page));			\ | 
|  | 405 | } | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 406 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); | 
|  | 407 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); | 
|  | 408 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); | 
|  | 409 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); | 
|  | 410 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | #undef SHOW_FUNCTION | 
|  | 412 |  | 
|  | 413 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\ | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 414 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | {									\ | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 416 | struct deadline_data *dd = e->elevator_data;			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | int __data;							\ | 
|  | 418 | int ret = deadline_var_store(&__data, (page), count);		\ | 
|  | 419 | if (__data < (MIN))						\ | 
|  | 420 | __data = (MIN);						\ | 
|  | 421 | else if (__data > (MAX))					\ | 
|  | 422 | __data = (MAX);						\ | 
|  | 423 | if (__CONV)							\ | 
|  | 424 | *(__PTR) = msecs_to_jiffies(__data);			\ | 
|  | 425 | else								\ | 
|  | 426 | *(__PTR) = __data;					\ | 
|  | 427 | return ret;							\ | 
|  | 428 | } | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 429 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | 
|  | 430 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | 
|  | 431 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | 
|  | 432 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); | 
|  | 433 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | #undef STORE_FUNCTION | 
|  | 435 |  | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 436 | #define DD_ATTR(name) \ | 
|  | 437 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ | 
|  | 438 | deadline_##name##_store) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 |  | 
| Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 440 | static struct elv_fs_entry deadline_attrs[] = { | 
|  | 441 | DD_ATTR(read_expire), | 
|  | 442 | DD_ATTR(write_expire), | 
|  | 443 | DD_ATTR(writes_starved), | 
|  | 444 | DD_ATTR(front_merges), | 
|  | 445 | DD_ATTR(fifo_batch), | 
|  | 446 | __ATTR_NULL | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | }; | 
|  | 448 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | static struct elevator_type iosched_deadline = { | 
|  | 450 | .ops = { | 
|  | 451 | .elevator_merge_fn = 		deadline_merge, | 
|  | 452 | .elevator_merged_fn =		deadline_merged_request, | 
|  | 453 | .elevator_merge_req_fn =	deadline_merged_requests, | 
| Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 454 | .elevator_dispatch_fn =		deadline_dispatch_requests, | 
|  | 455 | .elevator_add_req_fn =		deadline_add_request, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | .elevator_queue_empty_fn =	deadline_queue_empty, | 
| Jens Axboe | b8aca35 | 2006-07-13 12:34:24 +0200 | [diff] [blame] | 457 | .elevator_former_req_fn =	elv_rb_former_request, | 
|  | 458 | .elevator_latter_req_fn =	elv_rb_latter_request, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | .elevator_init_fn =		deadline_init_queue, | 
|  | 460 | .elevator_exit_fn =		deadline_exit_queue, | 
|  | 461 | }, | 
|  | 462 |  | 
| Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 463 | .elevator_attrs = deadline_attrs, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | .elevator_name = "deadline", | 
|  | 465 | .elevator_owner = THIS_MODULE, | 
|  | 466 | }; | 
|  | 467 |  | 
|  | 468 | static int __init deadline_init(void) | 
|  | 469 | { | 
| Adrian Bunk | 2fdd82b | 2007-12-12 18:51:56 +0100 | [diff] [blame] | 470 | elv_register(&iosched_deadline); | 
|  | 471 |  | 
|  | 472 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | } | 
|  | 474 |  | 
|  | 475 | static void __exit deadline_exit(void) | 
|  | 476 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | elv_unregister(&iosched_deadline); | 
|  | 478 | } | 
|  | 479 |  | 
|  | 480 | module_init(deadline_init); | 
|  | 481 | module_exit(deadline_exit); | 
|  | 482 |  | 
|  | 483 | MODULE_AUTHOR("Jens Axboe"); | 
|  | 484 | MODULE_LICENSE("GPL"); | 
|  | 485 | MODULE_DESCRIPTION("deadline IO scheduler"); |