blob: dd815475c524785c9244861170d3f6a78e957f12 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * background writeback - scan btree for dirty data and write it to the backing
3 * device
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070012#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070013
Kent Overstreetc37511b2013-04-26 15:39:55 -070014#include <trace/events/bcache.h>
15
Kent Overstreetcafe5632013-03-23 16:11:31 -070016static struct workqueue_struct *dirty_wq;
17
18static void read_dirty(struct closure *);
19
20struct dirty_io {
21 struct closure cl;
22 struct cached_dev *dc;
23 struct bio bio;
24};
25
26/* Rate limiting */
27
28static void __update_writeback_rate(struct cached_dev *dc)
29{
30 struct cache_set *c = dc->disk.c;
31 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
32 uint64_t cache_dirty_target =
33 div_u64(cache_sectors * dc->writeback_percent, 100);
34
35 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
36 c->cached_dev_sectors);
37
38 /* PD controller */
39
40 int change = 0;
41 int64_t error;
Kent Overstreet279afba2013-06-05 06:21:07 -070042 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
Kent Overstreetcafe5632013-03-23 16:11:31 -070043 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
44
45 dc->disk.sectors_dirty_last = dirty;
46
47 derivative *= dc->writeback_rate_d_term;
48 derivative = clamp(derivative, -dirty, dirty);
49
50 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
51 dc->writeback_rate_d_smooth, 0);
52
53 /* Avoid divide by zero */
54 if (!target)
55 goto out;
56
57 error = div64_s64((dirty + derivative - target) << 8, target);
58
59 change = div_s64((dc->writeback_rate.rate * error) >> 8,
60 dc->writeback_rate_p_term_inverse);
61
62 /* Don't increase writeback rate if the device isn't keeping up */
63 if (change > 0 &&
64 time_after64(local_clock(),
65 dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
66 change = 0;
67
68 dc->writeback_rate.rate =
69 clamp_t(int64_t, dc->writeback_rate.rate + change,
70 1, NSEC_PER_MSEC);
71out:
72 dc->writeback_rate_derivative = derivative;
73 dc->writeback_rate_change = change;
74 dc->writeback_rate_target = target;
75
76 schedule_delayed_work(&dc->writeback_rate_update,
77 dc->writeback_rate_update_seconds * HZ);
78}
79
80static void update_writeback_rate(struct work_struct *work)
81{
82 struct cached_dev *dc = container_of(to_delayed_work(work),
83 struct cached_dev,
84 writeback_rate_update);
85
86 down_read(&dc->writeback_lock);
87
88 if (atomic_read(&dc->has_dirty) &&
89 dc->writeback_percent)
90 __update_writeback_rate(dc);
91
92 up_read(&dc->writeback_lock);
93}
94
95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
96{
97 if (atomic_read(&dc->disk.detaching) ||
98 !dc->writeback_percent)
99 return 0;
100
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600101 return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700102}
103
104/* Background writeback */
105
106static bool dirty_pred(struct keybuf *buf, struct bkey *k)
107{
108 return KEY_DIRTY(k);
109}
110
111static void dirty_init(struct keybuf_key *w)
112{
113 struct dirty_io *io = w->private;
114 struct bio *bio = &io->bio;
115
116 bio_init(bio);
117 if (!io->dc->writeback_percent)
118 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
119
120 bio->bi_size = KEY_SIZE(&w->key) << 9;
121 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
122 bio->bi_private = w;
123 bio->bi_io_vec = bio->bi_inline_vecs;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600124 bch_bio_map(bio, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700125}
126
127static void refill_dirty(struct closure *cl)
128{
129 struct cached_dev *dc = container_of(cl, struct cached_dev,
130 writeback.cl);
131 struct keybuf *buf = &dc->writeback_keys;
132 bool searched_from_start = false;
133 struct bkey end = MAX_KEY;
134 SET_KEY_INODE(&end, dc->disk.id);
135
136 if (!atomic_read(&dc->disk.detaching) &&
137 !dc->writeback_running)
138 closure_return(cl);
139
140 down_write(&dc->writeback_lock);
141
142 if (!atomic_read(&dc->has_dirty)) {
143 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
144 bch_write_bdev_super(dc, NULL);
145
146 up_write(&dc->writeback_lock);
147 closure_return(cl);
148 }
149
150 if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
151 buf->last_scanned = KEY(dc->disk.id, 0, 0);
152 searched_from_start = true;
153 }
154
155 bch_refill_keybuf(dc->disk.c, buf, &end);
156
157 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
158 /* Searched the entire btree - delay awhile */
159
160 if (RB_EMPTY_ROOT(&buf->keys)) {
161 atomic_set(&dc->has_dirty, 0);
162 cached_dev_put(dc);
163 }
164
165 if (!atomic_read(&dc->disk.detaching))
166 closure_delay(&dc->writeback, dc->writeback_delay * HZ);
167 }
168
169 up_write(&dc->writeback_lock);
170
171 ratelimit_reset(&dc->writeback_rate);
172
173 /* Punt to workqueue only so we don't recurse and blow the stack */
174 continue_at(cl, read_dirty, dirty_wq);
175}
176
177void bch_writeback_queue(struct cached_dev *dc)
178{
179 if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
180 if (!atomic_read(&dc->disk.detaching))
181 closure_delay(&dc->writeback, dc->writeback_delay * HZ);
182
183 continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
184 }
185}
186
Kent Overstreet279afba2013-06-05 06:21:07 -0700187void bch_writeback_add(struct cached_dev *dc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700188{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700189 if (!atomic_read(&dc->has_dirty) &&
190 !atomic_xchg(&dc->has_dirty, 1)) {
191 atomic_inc(&dc->count);
192
193 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
194 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
195 /* XXX: should do this synchronously */
196 bch_write_bdev_super(dc, NULL);
197 }
198
199 bch_writeback_queue(dc);
200
201 if (dc->writeback_percent)
202 schedule_delayed_work(&dc->writeback_rate_update,
203 dc->writeback_rate_update_seconds * HZ);
204 }
205}
206
Kent Overstreet279afba2013-06-05 06:21:07 -0700207void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
208 uint64_t offset, int nr_sectors)
209{
210 struct bcache_device *d = c->devices[inode];
211 unsigned stripe_size, stripe_offset;
212 uint64_t stripe;
213
214 if (!d)
215 return;
216
217 stripe_size = 1 << d->stripe_size_bits;
218 stripe = offset >> d->stripe_size_bits;
219 stripe_offset = offset & (stripe_size - 1);
220
221 while (nr_sectors) {
222 int s = min_t(unsigned, abs(nr_sectors),
223 stripe_size - stripe_offset);
224
225 if (nr_sectors < 0)
226 s = -s;
227
228 atomic_add(s, d->stripe_sectors_dirty + stripe);
229 nr_sectors -= s;
230 stripe_offset = 0;
231 stripe++;
232 }
233}
234
Kent Overstreetcafe5632013-03-23 16:11:31 -0700235/* Background writeback - IO loop */
236
237static void dirty_io_destructor(struct closure *cl)
238{
239 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
240 kfree(io);
241}
242
243static void write_dirty_finish(struct closure *cl)
244{
245 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
246 struct keybuf_key *w = io->bio.bi_private;
247 struct cached_dev *dc = io->dc;
248 struct bio_vec *bv = bio_iovec_idx(&io->bio, io->bio.bi_vcnt);
249
250 while (bv-- != io->bio.bi_io_vec)
251 __free_page(bv->bv_page);
252
253 /* This is kind of a dumb way of signalling errors. */
254 if (KEY_DIRTY(&w->key)) {
255 unsigned i;
256 struct btree_op op;
257 bch_btree_op_init_stack(&op);
258
259 op.type = BTREE_REPLACE;
260 bkey_copy(&op.replace, &w->key);
261
262 SET_KEY_DIRTY(&w->key, false);
263 bch_keylist_add(&op.keys, &w->key);
264
265 for (i = 0; i < KEY_PTRS(&w->key); i++)
266 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
267
Kent Overstreetcafe5632013-03-23 16:11:31 -0700268 bch_btree_insert(&op, dc->disk.c);
269 closure_sync(&op.cl);
270
Kent Overstreetc37511b2013-04-26 15:39:55 -0700271 if (op.insert_collision)
272 trace_bcache_writeback_collision(&w->key);
273
Kent Overstreetcafe5632013-03-23 16:11:31 -0700274 atomic_long_inc(op.insert_collision
275 ? &dc->disk.c->writeback_keys_failed
276 : &dc->disk.c->writeback_keys_done);
277 }
278
279 bch_keybuf_del(&dc->writeback_keys, w);
280 atomic_dec_bug(&dc->in_flight);
281
282 closure_wake_up(&dc->writeback_wait);
283
284 closure_return_with_destructor(cl, dirty_io_destructor);
285}
286
287static void dirty_endio(struct bio *bio, int error)
288{
289 struct keybuf_key *w = bio->bi_private;
290 struct dirty_io *io = w->private;
291
292 if (error)
293 SET_KEY_DIRTY(&w->key, false);
294
295 closure_put(&io->cl);
296}
297
298static void write_dirty(struct closure *cl)
299{
300 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
301 struct keybuf_key *w = io->bio.bi_private;
302
303 dirty_init(w);
304 io->bio.bi_rw = WRITE;
305 io->bio.bi_sector = KEY_START(&w->key);
306 io->bio.bi_bdev = io->dc->bdev;
307 io->bio.bi_end_io = dirty_endio;
308
Kent Overstreetcafe5632013-03-23 16:11:31 -0700309 closure_bio_submit(&io->bio, cl, &io->dc->disk);
310
311 continue_at(cl, write_dirty_finish, dirty_wq);
312}
313
314static void read_dirty_endio(struct bio *bio, int error)
315{
316 struct keybuf_key *w = bio->bi_private;
317 struct dirty_io *io = w->private;
318
319 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
320 error, "reading dirty data from cache");
321
322 dirty_endio(bio, error);
323}
324
325static void read_dirty_submit(struct closure *cl)
326{
327 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
328
Kent Overstreetcafe5632013-03-23 16:11:31 -0700329 closure_bio_submit(&io->bio, cl, &io->dc->disk);
330
331 continue_at(cl, write_dirty, dirty_wq);
332}
333
334static void read_dirty(struct closure *cl)
335{
336 struct cached_dev *dc = container_of(cl, struct cached_dev,
337 writeback.cl);
338 unsigned delay = writeback_delay(dc, 0);
339 struct keybuf_key *w;
340 struct dirty_io *io;
341
342 /*
343 * XXX: if we error, background writeback just spins. Should use some
344 * mempools.
345 */
346
347 while (1) {
348 w = bch_keybuf_next(&dc->writeback_keys);
349 if (!w)
350 break;
351
352 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
353
354 if (delay > 0 &&
355 (KEY_START(&w->key) != dc->last_read ||
356 jiffies_to_msecs(delay) > 50)) {
357 w->private = NULL;
358
359 closure_delay(&dc->writeback, delay);
360 continue_at(cl, read_dirty, dirty_wq);
361 }
362
363 dc->last_read = KEY_OFFSET(&w->key);
364
365 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
366 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
367 GFP_KERNEL);
368 if (!io)
369 goto err;
370
371 w->private = io;
372 io->dc = dc;
373
374 dirty_init(w);
375 io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
376 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
377 &w->key, 0)->bdev;
378 io->bio.bi_rw = READ;
379 io->bio.bi_end_io = read_dirty_endio;
380
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600381 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700382 goto err_free;
383
Kent Overstreetc37511b2013-04-26 15:39:55 -0700384 trace_bcache_writeback(&w->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700385
386 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
387
388 delay = writeback_delay(dc, KEY_SIZE(&w->key));
389
390 atomic_inc(&dc->in_flight);
391
392 if (!closure_wait_event(&dc->writeback_wait, cl,
393 atomic_read(&dc->in_flight) < 64))
394 continue_at(cl, read_dirty, dirty_wq);
395 }
396
397 if (0) {
398err_free:
399 kfree(w->private);
400err:
401 bch_keybuf_del(&dc->writeback_keys, w);
402 }
403
404 refill_dirty(cl);
405}
406
Kent Overstreet444fc0b2013-05-11 17:07:26 -0700407/* Init */
408
409static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
410 struct cached_dev *dc)
411{
412 struct bkey *k;
413 struct btree_iter iter;
414
415 bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
416 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
417 if (!b->level) {
418 if (KEY_INODE(k) > dc->disk.id)
419 break;
420
421 if (KEY_DIRTY(k))
Kent Overstreet279afba2013-06-05 06:21:07 -0700422 bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
423 KEY_START(k),
424 KEY_SIZE(k));
Kent Overstreet444fc0b2013-05-11 17:07:26 -0700425 } else {
426 btree(sectors_dirty_init, k, b, op, dc);
427 if (KEY_INODE(k) > dc->disk.id)
428 break;
429
430 cond_resched();
431 }
432
433 return 0;
434}
435
436void bch_sectors_dirty_init(struct cached_dev *dc)
437{
438 struct btree_op op;
439
440 bch_btree_op_init_stack(&op);
441 btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
442}
443
Kent Overstreetf59fce82013-05-15 00:11:26 -0700444void bch_cached_dev_writeback_init(struct cached_dev *dc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700445{
446 closure_init_unlocked(&dc->writeback);
447 init_rwsem(&dc->writeback_lock);
448
449 bch_keybuf_init(&dc->writeback_keys, dirty_pred);
450
451 dc->writeback_metadata = true;
452 dc->writeback_running = true;
453 dc->writeback_percent = 10;
454 dc->writeback_delay = 30;
455 dc->writeback_rate.rate = 1024;
456
457 dc->writeback_rate_update_seconds = 30;
458 dc->writeback_rate_d_term = 16;
459 dc->writeback_rate_p_term_inverse = 64;
460 dc->writeback_rate_d_smooth = 8;
461
462 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
463 schedule_delayed_work(&dc->writeback_rate_update,
464 dc->writeback_rate_update_seconds * HZ);
465}
466
467void bch_writeback_exit(void)
468{
469 if (dirty_wq)
470 destroy_workqueue(dirty_wq);
471}
472
473int __init bch_writeback_init(void)
474{
475 dirty_wq = create_singlethread_workqueue("bcache_writeback");
476 if (!dirty_wq)
477 return -ENOMEM;
478
479 return 0;
480}