blob: a241e9fd4f7f4036a8daaddb0dc614919366e873 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Moving/copying garbage collector
3 *
4 * Copyright 2012 Google, Inc.
5 */
6
7#include "bcache.h"
8#include "btree.h"
9#include "debug.h"
10#include "request.h"
11
Kent Overstreetc37511b2013-04-26 15:39:55 -070012#include <trace/events/bcache.h>
13
Kent Overstreetcafe5632013-03-23 16:11:31 -070014struct moving_io {
15 struct keybuf_key *w;
16 struct search s;
17 struct bbio bio;
18};
19
20static bool moving_pred(struct keybuf *buf, struct bkey *k)
21{
22 struct cache_set *c = container_of(buf, struct cache_set,
23 moving_gc_keys);
24 unsigned i;
25
26 for (i = 0; i < KEY_PTRS(k); i++) {
27 struct cache *ca = PTR_CACHE(c, k, i);
28 struct bucket *g = PTR_BUCKET(c, k, i);
29
30 if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
31 return true;
32 }
33
34 return false;
35}
36
37/* Moving GC - IO loop */
38
39static void moving_io_destructor(struct closure *cl)
40{
41 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
42 kfree(io);
43}
44
45static void write_moving_finish(struct closure *cl)
46{
47 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
48 struct bio *bio = &io->bio.bio;
49 struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt);
50
51 while (bv-- != bio->bi_io_vec)
52 __free_page(bv->bv_page);
53
Kent Overstreetc37511b2013-04-26 15:39:55 -070054 if (io->s.op.insert_collision)
55 trace_bcache_gc_copy_collision(&io->w->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -070056
57 bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
58
59 atomic_dec_bug(&io->s.op.c->in_flight);
60 closure_wake_up(&io->s.op.c->moving_gc_wait);
61
62 closure_return_with_destructor(cl, moving_io_destructor);
63}
64
65static void read_moving_endio(struct bio *bio, int error)
66{
67 struct moving_io *io = container_of(bio->bi_private,
68 struct moving_io, s.cl);
69
70 if (error)
71 io->s.error = error;
72
73 bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
74}
75
76static void moving_init(struct moving_io *io)
77{
78 struct bio *bio = &io->bio.bio;
79
80 bio_init(bio);
81 bio_get(bio);
82 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
83
84 bio->bi_size = KEY_SIZE(&io->w->key) << 9;
85 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
86 PAGE_SECTORS);
87 bio->bi_private = &io->s.cl;
88 bio->bi_io_vec = bio->bi_inline_vecs;
Kent Overstreet169ef1c2013-03-28 12:50:55 -060089 bch_bio_map(bio, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -070090}
91
92static void write_moving(struct closure *cl)
93{
94 struct search *s = container_of(cl, struct search, cl);
95 struct moving_io *io = container_of(s, struct moving_io, s);
96
97 if (!s->error) {
Kent Overstreetcafe5632013-03-23 16:11:31 -070098 moving_init(io);
99
100 io->bio.bio.bi_sector = KEY_START(&io->w->key);
101 s->op.lock = -1;
102 s->op.write_prio = 1;
103 s->op.cache_bio = &io->bio.bio;
104
105 s->writeback = KEY_DIRTY(&io->w->key);
106 s->op.csum = KEY_CSUM(&io->w->key);
107
108 s->op.type = BTREE_REPLACE;
109 bkey_copy(&s->op.replace, &io->w->key);
110
111 closure_init(&s->op.cl, cl);
112 bch_insert_data(&s->op.cl);
113 }
114
115 continue_at(cl, write_moving_finish, NULL);
116}
117
118static void read_moving_submit(struct closure *cl)
119{
120 struct search *s = container_of(cl, struct search, cl);
121 struct moving_io *io = container_of(s, struct moving_io, s);
122 struct bio *bio = &io->bio.bio;
123
Kent Overstreetcafe5632013-03-23 16:11:31 -0700124 bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
125
126 continue_at(cl, write_moving, bch_gc_wq);
127}
128
129static void read_moving(struct closure *cl)
130{
131 struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
132 struct keybuf_key *w;
133 struct moving_io *io;
134 struct bio *bio;
135
136 /* XXX: if we error, background writeback could stall indefinitely */
137
138 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
Kent Overstreet72c27062013-06-05 06:24:39 -0700139 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
140 &MAX_KEY, moving_pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700141 if (!w)
142 break;
143
144 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
145 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
146 GFP_KERNEL);
147 if (!io)
148 goto err;
149
150 w->private = io;
151 io->w = w;
152 io->s.op.inode = KEY_INODE(&w->key);
153 io->s.op.c = c;
154
155 moving_init(io);
156 bio = &io->bio.bio;
157
158 bio->bi_rw = READ;
159 bio->bi_end_io = read_moving_endio;
160
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600161 if (bch_bio_alloc_pages(bio, GFP_KERNEL))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700162 goto err;
163
Kent Overstreetc37511b2013-04-26 15:39:55 -0700164 trace_bcache_gc_copy(&w->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700165
166 closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
167
168 if (atomic_inc_return(&c->in_flight) >= 64) {
169 closure_wait_event(&c->moving_gc_wait, cl,
170 atomic_read(&c->in_flight) < 64);
171 continue_at(cl, read_moving, bch_gc_wq);
172 }
173 }
174
175 if (0) {
176err: if (!IS_ERR_OR_NULL(w->private))
177 kfree(w->private);
178
179 bch_keybuf_del(&c->moving_gc_keys, w);
180 }
181
182 closure_return(cl);
183}
184
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700185static bool bucket_cmp(struct bucket *l, struct bucket *r)
186{
187 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
188}
189
190static unsigned bucket_heap_top(struct cache *ca)
191{
192 return GC_SECTORS_USED(heap_peek(&ca->heap));
193}
194
Kent Overstreetcafe5632013-03-23 16:11:31 -0700195void bch_moving_gc(struct closure *cl)
196{
197 struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
198 struct cache *ca;
199 struct bucket *b;
200 unsigned i;
201
Kent Overstreetcafe5632013-03-23 16:11:31 -0700202 if (!c->copy_gc_enabled)
203 closure_return(cl);
204
205 mutex_lock(&c->bucket_lock);
206
207 for_each_cache(ca, c, i) {
208 unsigned sectors_to_move = 0;
209 unsigned reserve_sectors = ca->sb.bucket_size *
210 min(fifo_used(&ca->free), ca->free.size / 2);
211
212 ca->heap.used = 0;
213
214 for_each_bucket(b, ca) {
215 if (!GC_SECTORS_USED(b))
216 continue;
217
218 if (!heap_full(&ca->heap)) {
219 sectors_to_move += GC_SECTORS_USED(b);
220 heap_add(&ca->heap, b, bucket_cmp);
221 } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700222 sectors_to_move -= bucket_heap_top(ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700223 sectors_to_move += GC_SECTORS_USED(b);
224
225 ca->heap.data[0] = b;
226 heap_sift(&ca->heap, 0, bucket_cmp);
227 }
228 }
229
230 while (sectors_to_move > reserve_sectors) {
231 heap_pop(&ca->heap, b, bucket_cmp);
232 sectors_to_move -= GC_SECTORS_USED(b);
233 }
234
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700235 ca->gc_move_threshold = bucket_heap_top(ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700236
237 pr_debug("threshold %u", ca->gc_move_threshold);
238 }
239
240 mutex_unlock(&c->bucket_lock);
241
242 c->moving_gc_keys.last_scanned = ZERO_KEY;
243
244 closure_init(&c->moving_gc, cl);
245 read_moving(&c->moving_gc);
246
247 closure_return(cl);
248}
249
250void bch_moving_init_cache_set(struct cache_set *c)
251{
Kent Overstreet72c27062013-06-05 06:24:39 -0700252 bch_keybuf_init(&c->moving_gc_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700253}