blob: 1e2aa8a8908c3ebe11cef1e5c4e58c435cc1a3ca [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
Tejun Heo4fed9472010-09-03 11:56:17 +02002 * Functions to sequence FLUSH and FUA writes.
Tejun Heoae1b1532011-01-25 12:43:54 +01003 *
4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
13 * If a request doesn't have data, only REQ_FLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_FLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 * difference. The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise.
22 *
23 * If the device has writeback cache and supports FUA, REQ_FLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 *
29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
32 * flush is issued and the pending_idx is toggled. When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step. This allows arbitrary merging of different types of FLUSH/FUA
35 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress. This makes
41 * double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 * This avoids issuing separate POSTFLUSHes for requests which shared
45 * PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 * starvation in the unlikely case where there are continuous stream of
50 * FUA (without FLUSH) requests.
51 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
60 * req_bio_endio().
61 *
62 * The above peculiarity requires that each FLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
Jens Axboe86db1e22008-01-29 14:53:40 +010065 */
Tejun Heoae1b1532011-01-25 12:43:54 +010066
Jens Axboe86db1e22008-01-29 14:53:40 +010067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090071#include <linux/gfp.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010072
73#include "blk.h"
74
Tejun Heo4fed9472010-09-03 11:56:17 +020075/* FLUSH/FUA sequences */
76enum {
Tejun Heoae1b1532011-01-25 12:43:54 +010077 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
78 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
79 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
80 REQ_FSEQ_DONE = (1 << 3),
Tejun Heo4fed9472010-09-03 11:56:17 +020081
Tejun Heoae1b1532011-01-25 12:43:54 +010082 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
83 REQ_FSEQ_POSTFLUSH,
Tejun Heo47f70d52010-09-03 11:56:17 +020084
85 /*
Tejun Heoae1b1532011-01-25 12:43:54 +010086 * If flush has been pending longer than the following timeout,
87 * it's issued even if flush_data requests are still in flight.
Tejun Heo47f70d52010-09-03 11:56:17 +020088 */
Tejun Heoae1b1532011-01-25 12:43:54 +010089 FLUSH_PENDING_TIMEOUT = 5 * HZ,
90};
91
92static bool blk_kick_flush(struct request_queue *q);
93
94static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
95{
96 unsigned int policy = 0;
97
98 if (fflags & REQ_FLUSH) {
99 if (rq->cmd_flags & REQ_FLUSH)
100 policy |= REQ_FSEQ_PREFLUSH;
101 if (blk_rq_sectors(rq))
102 policy |= REQ_FSEQ_DATA;
103 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
104 policy |= REQ_FSEQ_POSTFLUSH;
105 }
106 return policy;
Tejun Heo47f70d52010-09-03 11:56:17 +0200107}
108
Tejun Heoae1b1532011-01-25 12:43:54 +0100109static unsigned int blk_flush_cur_seq(struct request *rq)
Jens Axboe86db1e22008-01-29 14:53:40 +0100110{
Tejun Heoae1b1532011-01-25 12:43:54 +0100111 return 1 << ffz(rq->flush.seq);
Jens Axboe86db1e22008-01-29 14:53:40 +0100112}
113
Tejun Heoae1b1532011-01-25 12:43:54 +0100114static void blk_flush_restore_request(struct request *rq)
Jens Axboe86db1e22008-01-29 14:53:40 +0100115{
Tejun Heoae1b1532011-01-25 12:43:54 +0100116 /*
117 * After flush data completion, @rq->bio is %NULL but we need to
118 * complete the bio again. @rq->biotail is guaranteed to equal the
119 * original @rq->bio. Restore it.
120 */
121 rq->bio = rq->biotail;
122
123 /* make @rq a normal request */
124 rq->cmd_flags &= ~REQ_FLUSH_SEQ;
125 rq->end_io = NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100126}
127
Tejun Heoae1b1532011-01-25 12:43:54 +0100128/**
129 * blk_flush_complete_seq - complete flush sequence
130 * @rq: FLUSH/FUA request being sequenced
131 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
132 * @error: whether an error occurred
133 *
134 * @rq just completed @seq part of its flush sequence, record the
135 * completion and trigger the next step.
136 *
137 * CONTEXT:
138 * spin_lock_irq(q->queue_lock)
139 *
140 * RETURNS:
141 * %true if requests were added to the dispatch queue, %false otherwise.
142 */
143static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
144 int error)
Jens Axboe86db1e22008-01-29 14:53:40 +0100145{
Tejun Heoae1b1532011-01-25 12:43:54 +0100146 struct request_queue *q = rq->q;
147 struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
148 bool queued = false;
Jens Axboe86db1e22008-01-29 14:53:40 +0100149
Tejun Heoae1b1532011-01-25 12:43:54 +0100150 BUG_ON(rq->flush.seq & seq);
151 rq->flush.seq |= seq;
Jens Axboe86db1e22008-01-29 14:53:40 +0100152
Tejun Heoae1b1532011-01-25 12:43:54 +0100153 if (likely(!error))
154 seq = blk_flush_cur_seq(rq);
155 else
156 seq = REQ_FSEQ_DONE;
Tejun Heo28e7d182010-09-03 11:56:16 +0200157
Tejun Heoae1b1532011-01-25 12:43:54 +0100158 switch (seq) {
159 case REQ_FSEQ_PREFLUSH:
160 case REQ_FSEQ_POSTFLUSH:
161 /* queue for flush */
162 if (list_empty(pending))
163 q->flush_pending_since = jiffies;
164 list_move_tail(&rq->flush.list, pending);
Tejun Heo28e7d182010-09-03 11:56:16 +0200165 break;
Tejun Heoae1b1532011-01-25 12:43:54 +0100166
167 case REQ_FSEQ_DATA:
168 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
169 list_add(&rq->queuelist, &q->queue_head);
170 queued = true;
171 break;
172
173 case REQ_FSEQ_DONE:
Tejun Heo09d60c72010-09-03 11:56:17 +0200174 /*
Tejun Heoae1b1532011-01-25 12:43:54 +0100175 * @rq was previously adjusted by blk_flush_issue() for
176 * flush sequencing and may already have gone through the
177 * flush data request completion path. Restore @rq for
178 * normal completion and end it.
Tejun Heo09d60c72010-09-03 11:56:17 +0200179 */
Tejun Heoae1b1532011-01-25 12:43:54 +0100180 BUG_ON(!list_empty(&rq->queuelist));
181 list_del_init(&rq->flush.list);
182 blk_flush_restore_request(rq);
183 __blk_end_request_all(rq, error);
Tejun Heo28e7d182010-09-03 11:56:16 +0200184 break;
Tejun Heoae1b1532011-01-25 12:43:54 +0100185
Tejun Heo28e7d182010-09-03 11:56:16 +0200186 default:
187 BUG();
188 }
Christoph Hellwigcde4c402010-09-03 11:56:17 +0200189
Tejun Heoae1b1532011-01-25 12:43:54 +0100190 return blk_kick_flush(q) | queued;
Tejun Heo28e7d182010-09-03 11:56:16 +0200191}
192
Tejun Heoae1b1532011-01-25 12:43:54 +0100193static void flush_end_io(struct request *flush_rq, int error)
Jens Axboe86db1e22008-01-29 14:53:40 +0100194{
Tejun Heoae1b1532011-01-25 12:43:54 +0100195 struct request_queue *q = flush_rq->q;
196 struct list_head *running = &q->flush_queue[q->flush_running_idx];
197 bool was_empty = elv_queue_empty(q);
198 bool queued = false;
199 struct request *rq, *n;
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900200
Tejun Heoae1b1532011-01-25 12:43:54 +0100201 BUG_ON(q->flush_pending_idx == q->flush_running_idx);
202
203 /* account completion of the flush request */
204 q->flush_running_idx ^= 1;
205 elv_completed_request(q, flush_rq);
206
207 /* and push the waiting requests to the next stage */
208 list_for_each_entry_safe(rq, n, running, flush.list) {
209 unsigned int seq = blk_flush_cur_seq(rq);
210
211 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
212 queued |= blk_flush_complete_seq(rq, seq, error);
Tejun Heo28e7d182010-09-03 11:56:16 +0200213 }
214
Tejun Heoae1b1532011-01-25 12:43:54 +0100215 /* after populating an empty queue, kick it to avoid stall */
216 if (queued && was_empty)
217 __blk_run_queue(q);
218}
219
220/**
221 * blk_kick_flush - consider issuing flush request
222 * @q: request_queue being kicked
223 *
224 * Flush related states of @q have changed, consider issuing flush request.
225 * Please read the comment at the top of this file for more info.
226 *
227 * CONTEXT:
228 * spin_lock_irq(q->queue_lock)
229 *
230 * RETURNS:
231 * %true if flush was issued, %false otherwise.
232 */
233static bool blk_kick_flush(struct request_queue *q)
234{
235 struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
236 struct request *first_rq =
237 list_first_entry(pending, struct request, flush.list);
238
239 /* C1 described at the top of this file */
240 if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
241 return false;
242
243 /* C2 and C3 */
244 if (!list_empty(&q->flush_data_in_flight) &&
245 time_before(jiffies,
246 q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
247 return false;
Tejun Heo28e7d182010-09-03 11:56:16 +0200248
249 /*
Tejun Heoae1b1532011-01-25 12:43:54 +0100250 * Issue flush and toggle pending_idx. This makes pending_idx
251 * different from running_idx, which means flush is in flight.
Tejun Heo28e7d182010-09-03 11:56:16 +0200252 */
Tejun Heoae1b1532011-01-25 12:43:54 +0100253 blk_rq_init(q, &q->flush_rq);
254 q->flush_rq.cmd_type = REQ_TYPE_FS;
255 q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
256 q->flush_rq.rq_disk = first_rq->rq_disk;
257 q->flush_rq.end_io = flush_end_io;
Jens Axboe86db1e22008-01-29 14:53:40 +0100258
Tejun Heoae1b1532011-01-25 12:43:54 +0100259 q->flush_pending_idx ^= 1;
260 elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_FRONT);
261 return true;
262}
263
264static void flush_data_end_io(struct request *rq, int error)
265{
266 struct request_queue *q = rq->q;
Tejun Heoae1b1532011-01-25 12:43:54 +0100267
268 /* after populating an empty queue, kick it to avoid stall */
Jens Axboe73c10102011-03-08 13:19:51 +0100269 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
Tejun Heoae1b1532011-01-25 12:43:54 +0100270 __blk_run_queue(q);
271}
272
273/**
274 * blk_insert_flush - insert a new FLUSH/FUA request
275 * @rq: request to insert
276 *
277 * To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions.
278 * @rq is being submitted. Analyze what needs to be done and put it on the
279 * right queue.
280 *
281 * CONTEXT:
282 * spin_lock_irq(q->queue_lock)
283 */
284void blk_insert_flush(struct request *rq)
285{
286 struct request_queue *q = rq->q;
287 unsigned int fflags = q->flush_flags; /* may change, cache */
288 unsigned int policy = blk_flush_policy(fflags, rq);
289
290 BUG_ON(rq->end_io);
291 BUG_ON(!rq->bio || rq->bio != rq->biotail);
292
293 /*
294 * @policy now records what operations need to be done. Adjust
295 * REQ_FLUSH and FUA for the driver.
296 */
Tejun Heo4fed9472010-09-03 11:56:17 +0200297 rq->cmd_flags &= ~REQ_FLUSH;
Tejun Heoae1b1532011-01-25 12:43:54 +0100298 if (!(fflags & REQ_FUA))
Tejun Heo4fed9472010-09-03 11:56:17 +0200299 rq->cmd_flags &= ~REQ_FUA;
Jens Axboe86db1e22008-01-29 14:53:40 +0100300
Tejun Heoae1b1532011-01-25 12:43:54 +0100301 /*
302 * If there's data but flush is not necessary, the request can be
303 * processed directly without going through flush machinery. Queue
304 * for normal execution.
305 */
306 if ((policy & REQ_FSEQ_DATA) &&
307 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
308 list_add(&rq->queuelist, &q->queue_head);
309 return;
310 }
311
312 /*
313 * @rq should go through flush machinery. Mark it part of flush
314 * sequence and submit for further processing.
315 */
316 memset(&rq->flush, 0, sizeof(rq->flush));
317 INIT_LIST_HEAD(&rq->flush.list);
318 rq->cmd_flags |= REQ_FLUSH_SEQ;
319 rq->end_io = flush_data_end_io;
320
321 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
322}
323
324/**
325 * blk_abort_flushes - @q is being aborted, abort flush requests
326 * @q: request_queue being aborted
327 *
328 * To be called from elv_abort_queue(). @q is being aborted. Prepare all
329 * FLUSH/FUA requests for abortion.
330 *
331 * CONTEXT:
332 * spin_lock_irq(q->queue_lock)
333 */
334void blk_abort_flushes(struct request_queue *q)
335{
336 struct request *rq, *n;
337 int i;
338
339 /*
340 * Requests in flight for data are already owned by the dispatch
341 * queue or the device driver. Just restore for normal completion.
342 */
343 list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
344 list_del_init(&rq->flush.list);
345 blk_flush_restore_request(rq);
346 }
347
348 /*
349 * We need to give away requests on flush queues. Restore for
350 * normal completion and put them on the dispatch queue.
351 */
352 for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
353 list_for_each_entry_safe(rq, n, &q->flush_queue[i],
354 flush.list) {
355 list_del_init(&rq->flush.list);
356 blk_flush_restore_request(rq);
357 list_add_tail(&rq->queuelist, &q->queue_head);
358 }
359 }
Jens Axboe86db1e22008-01-29 14:53:40 +0100360}
361
Tejun Heod391a2d2010-09-03 11:56:17 +0200362static void bio_end_flush(struct bio *bio, int err)
Jens Axboe86db1e22008-01-29 14:53:40 +0100363{
Tejun Heod391a2d2010-09-03 11:56:17 +0200364 if (err)
Jens Axboe86db1e22008-01-29 14:53:40 +0100365 clear_bit(BIO_UPTODATE, &bio->bi_flags);
Dmitry Monakhovf17e2322010-04-28 17:55:07 +0400366 if (bio->bi_private)
367 complete(bio->bi_private);
368 bio_put(bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100369}
370
371/**
372 * blkdev_issue_flush - queue a flush
373 * @bdev: blockdev to issue flush for
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400374 * @gfp_mask: memory allocation flags (for bio_alloc)
Jens Axboe86db1e22008-01-29 14:53:40 +0100375 * @error_sector: error sector
376 *
377 * Description:
378 * Issue a flush for the block device in question. Caller can supply
379 * room for storing the error offset in case of a flush error, if they
Dmitry Monakhovf17e2322010-04-28 17:55:07 +0400380 * wish to. If WAIT flag is not passed then caller may check only what
381 * request was pushed in some internal queue for later handling.
Jens Axboe86db1e22008-01-29 14:53:40 +0100382 */
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400383int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200384 sector_t *error_sector)
Jens Axboe86db1e22008-01-29 14:53:40 +0100385{
386 DECLARE_COMPLETION_ONSTACK(wait);
387 struct request_queue *q;
388 struct bio *bio;
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400389 int ret = 0;
Jens Axboe86db1e22008-01-29 14:53:40 +0100390
391 if (bdev->bd_disk == NULL)
392 return -ENXIO;
393
394 q = bdev_get_queue(bdev);
395 if (!q)
396 return -ENXIO;
397
Dave Chinnerf10d9f62010-07-13 17:50:50 +1000398 /*
399 * some block devices may not have their queue correctly set up here
400 * (e.g. loop device without a backing file) and so issuing a flush
401 * here will panic. Ensure there is a request function before issuing
Tejun Heod391a2d2010-09-03 11:56:17 +0200402 * the flush.
Dave Chinnerf10d9f62010-07-13 17:50:50 +1000403 */
404 if (!q->make_request_fn)
405 return -ENXIO;
406
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400407 bio = bio_alloc(gfp_mask, 0);
Tejun Heod391a2d2010-09-03 11:56:17 +0200408 bio->bi_end_io = bio_end_flush;
Jens Axboe86db1e22008-01-29 14:53:40 +0100409 bio->bi_bdev = bdev;
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200410 bio->bi_private = &wait;
Dmitry Monakhovf17e2322010-04-28 17:55:07 +0400411
412 bio_get(bio);
Tejun Heod391a2d2010-09-03 11:56:17 +0200413 submit_bio(WRITE_FLUSH, bio);
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200414 wait_for_completion(&wait);
415
416 /*
417 * The driver must store the error location in ->bi_sector, if
418 * it supports it. For non-stacked drivers, this should be
419 * copied from blk_rq_pos(rq).
420 */
421 if (error_sector)
422 *error_sector = bio->bi_sector;
Jens Axboe86db1e22008-01-29 14:53:40 +0100423
Tejun Heod391a2d2010-09-03 11:56:17 +0200424 if (!bio_flagged(bio, BIO_UPTODATE))
Jens Axboe86db1e22008-01-29 14:53:40 +0100425 ret = -EIO;
426
427 bio_put(bio);
428 return ret;
429}
Jens Axboe86db1e22008-01-29 14:53:40 +0100430EXPORT_SYMBOL(blkdev_issue_flush);