1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/sched/signal.h>
38 #include <linux/rculist.h>
39 #include <linux/delay.h>
40 #include <trace/events/bcache.h>
44 * register_bcache: Return errors out to userspace correctly
46 * Writeback: don't undirty key until after a cache flush
48 * Create an iterator for key pointers
50 * On btree write error, mark bucket such that it won't be freed from the cache
53 * Check for bad keys in replay
55 * Refcount journal entries in journal_replay
58 * Finish incremental gc
59 * Gc should free old UUIDs, data for invalid UUIDs
61 * Provide a way to list backing device UUIDs we have data cached for, and
62 * probably how long it's been since we've seen them, and a way to invalidate
63 * dirty data for devices that will never be attached again
65 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
66 * that based on that and how much dirty data we have we can keep writeback
69 * Add a tracepoint or somesuch to watch for writeback starvation
71 * When btree depth > 1 and splitting an interior node, we have to make sure
72 * alloc_bucket() cannot fail. This should be true but is not completely
77 * If data write is less than hard sector size of ssd, round up offset in open
78 * bucket to the next whole sector
80 * Superblock needs to be fleshed out for multiple cache devices
82 * Add a sysfs tunable for the number of writeback IOs in flight
84 * Add a sysfs tunable for the number of open data buckets
86 * IO tracking: Can we track when one process is doing io on behalf of another?
87 * IO tracking: Don't use just an average, weigh more recent stuff higher
89 * Test module load/unload
92 #define MAX_NEED_GC 64
93 #define MAX_SAVE_PRIO 72
94 #define MAX_GC_TIMES 100
95 #define MIN_GC_NODES 100
96 #define GC_SLEEP_MS 100
98 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
100 #define PTR_HASH(c, k) \
101 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
103 #define insert_lock(s, b) ((b)->level <= (s)->lock)
106 * These macros are for recursing down the btree - they handle the details of
107 * locking and looking up nodes in the cache for you. They're best treated as
108 * mere syntax when reading code that uses them.
110 * op->lock determines whether we take a read or a write lock at a given depth.
111 * If you've got a read lock and find that you need a write lock (i.e. you're
112 * going to have to split), set op->lock and return -EINTR; btree_root() will
113 * call you again and you'll have the correct lock.
117 * btree - recurse down the btree on a specified key
118 * @fn: function to call, which will be passed the child node
119 * @key: key to recurse on
120 * @b: parent btree node
121 * @op: pointer to struct btree_op
123 #define btree(fn, key, b, op, ...) \
125 int _r, l = (b)->level - 1; \
126 bool _w = l <= (op)->lock; \
127 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
129 if (!IS_ERR(_child)) { \
130 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
131 rw_unlock(_w, _child); \
133 _r = PTR_ERR(_child); \
138 * btree_root - call a function on the root of the btree
139 * @fn: function to call, which will be passed the child node
141 * @op: pointer to struct btree_op
143 #define btree_root(fn, c, op, ...) \
147 struct btree *_b = (c)->root; \
148 bool _w = insert_lock(op, _b); \
149 rw_lock(_w, _b, _b->level); \
150 if (_b == (c)->root && \
151 _w == insert_lock(op, _b)) { \
152 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
155 bch_cannibalize_unlock(c); \
158 } while (_r == -EINTR); \
160 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
164 static inline struct bset *write_block(struct btree *b)
166 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
169 static void bch_btree_init_next(struct btree *b)
171 /* If not a leaf node, always sort */
172 if (b->level && b->keys.nsets)
173 bch_btree_sort(&b->keys, &b->c->sort);
175 bch_btree_sort_lazy(&b->keys, &b->c->sort);
177 if (b->written < btree_blocks(b))
178 bch_bset_init_next(&b->keys, write_block(b),
179 bset_magic(&b->c->sb));
183 /* Btree key manipulation */
185 void bkey_put(struct cache_set *c, struct bkey *k)
189 for (i = 0; i < KEY_PTRS(k); i++)
190 if (ptr_available(c, k, i))
191 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
196 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
198 uint64_t crc = b->key.ptr[0];
199 void *data = (void *) i + 8, *end = bset_bkey_last(i);
201 crc = bch_crc64_update(crc, data, end - data);
202 return crc ^ 0xffffffffffffffffULL;
205 void bch_btree_node_read_done(struct btree *b)
207 const char *err = "bad btree header";
208 struct bset *i = btree_bset_first(b);
209 struct btree_iter *iter;
212 * c->fill_iter can allocate an iterator with more memory space
213 * than static MAX_BSETS.
214 * See the comment arount cache_set->fill_iter.
216 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
217 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
220 #ifdef CONFIG_BCACHE_DEBUG
228 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
229 i = write_block(b)) {
230 err = "unsupported bset version";
231 if (i->version > BCACHE_BSET_VERSION)
234 err = "bad btree header";
235 if (b->written + set_blocks(i, block_bytes(b->c)) >
240 if (i->magic != bset_magic(&b->c->sb))
243 err = "bad checksum";
244 switch (i->version) {
246 if (i->csum != csum_set(i))
249 case BCACHE_BSET_VERSION:
250 if (i->csum != btree_csum_set(b, i))
256 if (i != b->keys.set[0].data && !i->keys)
259 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
261 b->written += set_blocks(i, block_bytes(b->c));
264 err = "corrupted btree";
265 for (i = write_block(b);
266 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
267 i = ((void *) i) + block_bytes(b->c))
268 if (i->seq == b->keys.set[0].data->seq)
271 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
273 i = b->keys.set[0].data;
274 err = "short btree key";
275 if (b->keys.set[0].size &&
276 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
279 if (b->written < btree_blocks(b))
280 bch_bset_init_next(&b->keys, write_block(b),
281 bset_magic(&b->c->sb));
283 mempool_free(iter, &b->c->fill_iter);
286 set_btree_node_io_error(b);
287 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
288 err, PTR_BUCKET_NR(b->c, &b->key, 0),
289 bset_block_offset(b, i), i->keys);
293 static void btree_node_read_endio(struct bio *bio)
295 struct closure *cl = bio->bi_private;
300 static void bch_btree_node_read(struct btree *b)
302 uint64_t start_time = local_clock();
306 trace_bcache_btree_read(b);
308 closure_init_stack(&cl);
310 bio = bch_bbio_alloc(b->c);
311 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
312 bio->bi_end_io = btree_node_read_endio;
313 bio->bi_private = &cl;
314 bio->bi_opf = REQ_OP_READ | REQ_META;
316 bch_bio_map(bio, b->keys.set[0].data);
318 bch_submit_bbio(bio, b->c, &b->key, 0);
322 set_btree_node_io_error(b);
324 bch_bbio_free(bio, b->c);
326 if (btree_node_io_error(b))
329 bch_btree_node_read_done(b);
330 bch_time_stats_update(&b->c->btree_read_time, start_time);
334 bch_cache_set_error(b->c, "io error reading bucket %zu",
335 PTR_BUCKET_NR(b->c, &b->key, 0));
338 static void btree_complete_write(struct btree *b, struct btree_write *w)
340 if (w->prio_blocked &&
341 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
342 wake_up_allocators(b->c);
345 atomic_dec_bug(w->journal);
346 __closure_wake_up(&b->c->journal.wait);
353 static void btree_node_write_unlock(struct closure *cl)
355 struct btree *b = container_of(cl, struct btree, io);
360 static void __btree_node_write_done(struct closure *cl)
362 struct btree *b = container_of(cl, struct btree, io);
363 struct btree_write *w = btree_prev_write(b);
365 bch_bbio_free(b->bio, b->c);
367 btree_complete_write(b, w);
369 if (btree_node_dirty(b))
370 schedule_delayed_work(&b->work, 30 * HZ);
372 closure_return_with_destructor(cl, btree_node_write_unlock);
375 static void btree_node_write_done(struct closure *cl)
377 struct btree *b = container_of(cl, struct btree, io);
379 bio_free_pages(b->bio);
380 __btree_node_write_done(cl);
383 static void btree_node_write_endio(struct bio *bio)
385 struct closure *cl = bio->bi_private;
386 struct btree *b = container_of(cl, struct btree, io);
389 set_btree_node_io_error(b);
391 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
395 static void do_btree_node_write(struct btree *b)
397 struct closure *cl = &b->io;
398 struct bset *i = btree_bset_last(b);
401 i->version = BCACHE_BSET_VERSION;
402 i->csum = btree_csum_set(b, i);
405 b->bio = bch_bbio_alloc(b->c);
407 b->bio->bi_end_io = btree_node_write_endio;
408 b->bio->bi_private = cl;
409 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
410 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
411 bch_bio_map(b->bio, i);
414 * If we're appending to a leaf node, we don't technically need FUA -
415 * this write just needs to be persisted before the next journal write,
416 * which will be marked FLUSH|FUA.
418 * Similarly if we're writing a new btree root - the pointer is going to
419 * be in the next journal entry.
421 * But if we're writing a new btree node (that isn't a root) or
422 * appending to a non leaf btree node, we need either FUA or a flush
423 * when we write the parent with the new pointer. FUA is cheaper than a
424 * flush, and writes appending to leaf nodes aren't blocking anything so
425 * just make all btree node writes FUA to keep things sane.
428 bkey_copy(&k.key, &b->key);
429 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
430 bset_sector_offset(&b->keys, i));
432 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
434 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
435 struct bvec_iter_all iter_all;
437 bio_for_each_segment_all(bv, b->bio, iter_all) {
438 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
442 bch_submit_bbio(b->bio, b->c, &k.key, 0);
444 continue_at(cl, btree_node_write_done, NULL);
447 * No problem for multipage bvec since the bio is
451 bch_bio_map(b->bio, i);
453 bch_submit_bbio(b->bio, b->c, &k.key, 0);
456 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
460 void __bch_btree_node_write(struct btree *b, struct closure *parent)
462 struct bset *i = btree_bset_last(b);
464 lockdep_assert_held(&b->write_lock);
466 trace_bcache_btree_write(b);
468 BUG_ON(current->bio_list);
469 BUG_ON(b->written >= btree_blocks(b));
470 BUG_ON(b->written && !i->keys);
471 BUG_ON(btree_bset_first(b)->seq != i->seq);
472 bch_check_keys(&b->keys, "writing");
474 cancel_delayed_work(&b->work);
476 /* If caller isn't waiting for write, parent refcount is cache set */
478 closure_init(&b->io, parent ?: &b->c->cl);
480 clear_bit(BTREE_NODE_dirty, &b->flags);
481 change_bit(BTREE_NODE_write_idx, &b->flags);
483 do_btree_node_write(b);
485 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
486 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
488 b->written += set_blocks(i, block_bytes(b->c));
491 void bch_btree_node_write(struct btree *b, struct closure *parent)
493 unsigned int nsets = b->keys.nsets;
495 lockdep_assert_held(&b->lock);
497 __bch_btree_node_write(b, parent);
500 * do verify if there was more than one set initially (i.e. we did a
501 * sort) and we sorted down to a single set:
503 if (nsets && !b->keys.nsets)
506 bch_btree_init_next(b);
509 static void bch_btree_node_write_sync(struct btree *b)
513 closure_init_stack(&cl);
515 mutex_lock(&b->write_lock);
516 bch_btree_node_write(b, &cl);
517 mutex_unlock(&b->write_lock);
522 static void btree_node_write_work(struct work_struct *w)
524 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
526 mutex_lock(&b->write_lock);
527 if (btree_node_dirty(b))
528 __bch_btree_node_write(b, NULL);
529 mutex_unlock(&b->write_lock);
532 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
534 struct bset *i = btree_bset_last(b);
535 struct btree_write *w = btree_current_write(b);
537 lockdep_assert_held(&b->write_lock);
542 if (!btree_node_dirty(b))
543 schedule_delayed_work(&b->work, 30 * HZ);
545 set_btree_node_dirty(b);
548 * w->journal is always the oldest journal pin of all bkeys
549 * in the leaf node, to make sure the oldest jset seq won't
550 * be increased before this btree node is flushed.
554 journal_pin_cmp(b->c, w->journal, journal_ref)) {
555 atomic_dec_bug(w->journal);
560 w->journal = journal_ref;
561 atomic_inc(w->journal);
565 /* Force write if set is too big */
566 if (set_bytes(i) > PAGE_SIZE - 48 &&
568 bch_btree_node_write(b, NULL);
572 * Btree in memory cache - allocation/freeing
573 * mca -> memory cache
576 #define mca_reserve(c) (((c->root && c->root->level) \
577 ? c->root->level : 1) * 8 + 16)
578 #define mca_can_free(c) \
579 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
581 static void mca_data_free(struct btree *b)
583 BUG_ON(b->io_mutex.count != 1);
585 bch_btree_keys_free(&b->keys);
587 b->c->btree_cache_used--;
588 list_move(&b->list, &b->c->btree_cache_freed);
591 static void mca_bucket_free(struct btree *b)
593 BUG_ON(btree_node_dirty(b));
596 hlist_del_init_rcu(&b->hash);
597 list_move(&b->list, &b->c->btree_cache_freeable);
600 static unsigned int btree_order(struct bkey *k)
602 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
605 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
607 if (!bch_btree_keys_alloc(&b->keys,
609 ilog2(b->c->btree_pages),
612 b->c->btree_cache_used++;
613 list_move(&b->list, &b->c->btree_cache);
615 list_move(&b->list, &b->c->btree_cache_freed);
619 static struct btree *mca_bucket_alloc(struct cache_set *c,
620 struct bkey *k, gfp_t gfp)
623 * kzalloc() is necessary here for initialization,
624 * see code comments in bch_btree_keys_init().
626 struct btree *b = kzalloc(sizeof(struct btree), gfp);
631 init_rwsem(&b->lock);
632 lockdep_set_novalidate_class(&b->lock);
633 mutex_init(&b->write_lock);
634 lockdep_set_novalidate_class(&b->write_lock);
635 INIT_LIST_HEAD(&b->list);
636 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
638 sema_init(&b->io_mutex, 1);
640 mca_data_alloc(b, k, gfp);
644 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
648 closure_init_stack(&cl);
649 lockdep_assert_held(&b->c->bucket_lock);
651 if (!down_write_trylock(&b->lock))
654 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
656 if (b->keys.page_order < min_order)
660 if (btree_node_dirty(b))
663 if (down_trylock(&b->io_mutex))
670 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
671 * __bch_btree_node_write(). To avoid an extra flush, acquire
672 * b->write_lock before checking BTREE_NODE_dirty bit.
674 mutex_lock(&b->write_lock);
676 * If this btree node is selected in btree_flush_write() by journal
677 * code, delay and retry until the node is flushed by journal code
678 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
680 if (btree_node_journal_flush(b)) {
681 pr_debug("bnode %p is flushing by journal, retry", b);
682 mutex_unlock(&b->write_lock);
687 if (btree_node_dirty(b))
688 __bch_btree_node_write(b, &cl);
689 mutex_unlock(&b->write_lock);
693 /* wait for any in flight btree write */
703 static unsigned long bch_mca_scan(struct shrinker *shrink,
704 struct shrink_control *sc)
706 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
708 unsigned long i, nr = sc->nr_to_scan;
709 unsigned long freed = 0;
710 unsigned int btree_cache_used;
712 if (c->shrinker_disabled)
715 if (c->btree_cache_alloc_lock)
718 /* Return -1 if we can't do anything right now */
719 if (sc->gfp_mask & __GFP_IO)
720 mutex_lock(&c->bucket_lock);
721 else if (!mutex_trylock(&c->bucket_lock))
725 * It's _really_ critical that we don't free too many btree nodes - we
726 * have to always leave ourselves a reserve. The reserve is how we
727 * guarantee that allocating memory for a new btree node can always
728 * succeed, so that inserting keys into the btree can always succeed and
729 * IO can always make forward progress:
731 nr /= c->btree_pages;
734 nr = min_t(unsigned long, nr, mca_can_free(c));
737 btree_cache_used = c->btree_cache_used;
738 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
742 if (!mca_reap(b, 0, false)) {
751 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
752 if (nr <= 0 || i >= btree_cache_used)
755 if (!mca_reap(b, 0, false)) {
766 mutex_unlock(&c->bucket_lock);
767 return freed * c->btree_pages;
770 static unsigned long bch_mca_count(struct shrinker *shrink,
771 struct shrink_control *sc)
773 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
775 if (c->shrinker_disabled)
778 if (c->btree_cache_alloc_lock)
781 return mca_can_free(c) * c->btree_pages;
784 void bch_btree_cache_free(struct cache_set *c)
789 closure_init_stack(&cl);
791 if (c->shrink.list.next)
792 unregister_shrinker(&c->shrink);
794 mutex_lock(&c->bucket_lock);
796 #ifdef CONFIG_BCACHE_DEBUG
798 list_move(&c->verify_data->list, &c->btree_cache);
800 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
803 list_splice(&c->btree_cache_freeable,
806 while (!list_empty(&c->btree_cache)) {
807 b = list_first_entry(&c->btree_cache, struct btree, list);
810 * This function is called by cache_set_free(), no I/O
811 * request on cache now, it is unnecessary to acquire
812 * b->write_lock before clearing BTREE_NODE_dirty anymore.
814 if (btree_node_dirty(b)) {
815 btree_complete_write(b, btree_current_write(b));
816 clear_bit(BTREE_NODE_dirty, &b->flags);
821 while (!list_empty(&c->btree_cache_freed)) {
822 b = list_first_entry(&c->btree_cache_freed,
825 cancel_delayed_work_sync(&b->work);
829 mutex_unlock(&c->bucket_lock);
832 int bch_btree_cache_alloc(struct cache_set *c)
836 for (i = 0; i < mca_reserve(c); i++)
837 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
840 list_splice_init(&c->btree_cache,
841 &c->btree_cache_freeable);
843 #ifdef CONFIG_BCACHE_DEBUG
844 mutex_init(&c->verify_lock);
846 c->verify_ondisk = (void *)
847 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
849 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
851 if (c->verify_data &&
852 c->verify_data->keys.set->data)
853 list_del_init(&c->verify_data->list);
855 c->verify_data = NULL;
858 c->shrink.count_objects = bch_mca_count;
859 c->shrink.scan_objects = bch_mca_scan;
861 c->shrink.batch = c->btree_pages * 2;
863 if (register_shrinker(&c->shrink))
864 pr_warn("bcache: %s: could not register shrinker",
870 /* Btree in memory cache - hash table */
872 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
874 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
877 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
882 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
883 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
891 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
893 spin_lock(&c->btree_cannibalize_lock);
894 if (likely(c->btree_cache_alloc_lock == NULL)) {
895 c->btree_cache_alloc_lock = current;
896 } else if (c->btree_cache_alloc_lock != current) {
898 prepare_to_wait(&c->btree_cache_wait, &op->wait,
899 TASK_UNINTERRUPTIBLE);
900 spin_unlock(&c->btree_cannibalize_lock);
903 spin_unlock(&c->btree_cannibalize_lock);
908 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
913 trace_bcache_btree_cache_cannibalize(c);
915 if (mca_cannibalize_lock(c, op))
916 return ERR_PTR(-EINTR);
918 list_for_each_entry_reverse(b, &c->btree_cache, list)
919 if (!mca_reap(b, btree_order(k), false))
922 list_for_each_entry_reverse(b, &c->btree_cache, list)
923 if (!mca_reap(b, btree_order(k), true))
926 WARN(1, "btree cache cannibalize failed\n");
927 return ERR_PTR(-ENOMEM);
931 * We can only have one thread cannibalizing other cached btree nodes at a time,
932 * or we'll deadlock. We use an open coded mutex to ensure that, which a
933 * cannibalize_bucket() will take. This means every time we unlock the root of
934 * the btree, we need to release this lock if we have it held.
936 static void bch_cannibalize_unlock(struct cache_set *c)
938 spin_lock(&c->btree_cannibalize_lock);
939 if (c->btree_cache_alloc_lock == current) {
940 c->btree_cache_alloc_lock = NULL;
941 wake_up(&c->btree_cache_wait);
943 spin_unlock(&c->btree_cannibalize_lock);
946 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
947 struct bkey *k, int level)
951 BUG_ON(current->bio_list);
953 lockdep_assert_held(&c->bucket_lock);
958 /* btree_free() doesn't free memory; it sticks the node on the end of
959 * the list. Check if there's any freed nodes there:
961 list_for_each_entry(b, &c->btree_cache_freeable, list)
962 if (!mca_reap(b, btree_order(k), false))
965 /* We never free struct btree itself, just the memory that holds the on
966 * disk node. Check the freed list before allocating a new one:
968 list_for_each_entry(b, &c->btree_cache_freed, list)
969 if (!mca_reap(b, 0, false)) {
970 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
971 if (!b->keys.set[0].data)
977 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
981 BUG_ON(!down_write_trylock(&b->lock));
982 if (!b->keys.set->data)
985 BUG_ON(b->io_mutex.count != 1);
987 bkey_copy(&b->key, k);
988 list_move(&b->list, &c->btree_cache);
989 hlist_del_init_rcu(&b->hash);
990 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
992 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
993 b->parent = (void *) ~0UL;
999 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
1000 &b->c->expensive_debug_checks);
1002 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
1003 &b->c->expensive_debug_checks);
1010 b = mca_cannibalize(c, op, k);
1018 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
1019 * in from disk if necessary.
1021 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
1023 * The btree node will have either a read or a write lock held, depending on
1024 * level and op->lock.
1026 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
1027 struct bkey *k, int level, bool write,
1028 struct btree *parent)
1038 if (current->bio_list)
1039 return ERR_PTR(-EAGAIN);
1041 mutex_lock(&c->bucket_lock);
1042 b = mca_alloc(c, op, k, level);
1043 mutex_unlock(&c->bucket_lock);
1050 bch_btree_node_read(b);
1053 downgrade_write(&b->lock);
1055 rw_lock(write, b, level);
1056 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1057 rw_unlock(write, b);
1060 BUG_ON(b->level != level);
1063 if (btree_node_io_error(b)) {
1064 rw_unlock(write, b);
1065 return ERR_PTR(-EIO);
1068 BUG_ON(!b->written);
1072 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1073 prefetch(b->keys.set[i].tree);
1074 prefetch(b->keys.set[i].data);
1077 for (; i <= b->keys.nsets; i++)
1078 prefetch(b->keys.set[i].data);
1083 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1087 mutex_lock(&parent->c->bucket_lock);
1088 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1089 mutex_unlock(&parent->c->bucket_lock);
1091 if (!IS_ERR_OR_NULL(b)) {
1093 bch_btree_node_read(b);
1100 static void btree_node_free(struct btree *b)
1102 trace_bcache_btree_node_free(b);
1104 BUG_ON(b == b->c->root);
1107 mutex_lock(&b->write_lock);
1109 * If the btree node is selected and flushing in btree_flush_write(),
1110 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1111 * then it is safe to free the btree node here. Otherwise this btree
1112 * node will be in race condition.
1114 if (btree_node_journal_flush(b)) {
1115 mutex_unlock(&b->write_lock);
1116 pr_debug("bnode %p journal_flush set, retry", b);
1121 if (btree_node_dirty(b)) {
1122 btree_complete_write(b, btree_current_write(b));
1123 clear_bit(BTREE_NODE_dirty, &b->flags);
1126 mutex_unlock(&b->write_lock);
1128 cancel_delayed_work(&b->work);
1130 mutex_lock(&b->c->bucket_lock);
1131 bch_bucket_free(b->c, &b->key);
1133 mutex_unlock(&b->c->bucket_lock);
1136 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1137 int level, bool wait,
1138 struct btree *parent)
1141 struct btree *b = ERR_PTR(-EAGAIN);
1143 mutex_lock(&c->bucket_lock);
1145 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1148 bkey_put(c, &k.key);
1149 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1151 b = mca_alloc(c, op, &k.key, level);
1157 "Tried to allocate bucket that was in btree cache");
1162 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1164 mutex_unlock(&c->bucket_lock);
1166 trace_bcache_btree_node_alloc(b);
1169 bch_bucket_free(c, &k.key);
1171 mutex_unlock(&c->bucket_lock);
1173 trace_bcache_btree_node_alloc_fail(c);
1177 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1178 struct btree_op *op, int level,
1179 struct btree *parent)
1181 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1184 static struct btree *btree_node_alloc_replacement(struct btree *b,
1185 struct btree_op *op)
1187 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1189 if (!IS_ERR_OR_NULL(n)) {
1190 mutex_lock(&n->write_lock);
1191 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1192 bkey_copy_key(&n->key, &b->key);
1193 mutex_unlock(&n->write_lock);
1199 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1203 mutex_lock(&b->c->bucket_lock);
1205 atomic_inc(&b->c->prio_blocked);
1207 bkey_copy(k, &b->key);
1208 bkey_copy_key(k, &ZERO_KEY);
1210 for (i = 0; i < KEY_PTRS(k); i++)
1212 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1213 PTR_BUCKET(b->c, &b->key, i)));
1215 mutex_unlock(&b->c->bucket_lock);
1218 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1220 struct cache_set *c = b->c;
1222 unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
1224 mutex_lock(&c->bucket_lock);
1226 for_each_cache(ca, c, i)
1227 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1229 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1230 TASK_UNINTERRUPTIBLE);
1231 mutex_unlock(&c->bucket_lock);
1235 mutex_unlock(&c->bucket_lock);
1237 return mca_cannibalize_lock(b->c, op);
1240 /* Garbage collection */
1242 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1250 * ptr_invalid() can't return true for the keys that mark btree nodes as
1251 * freed, but since ptr_bad() returns true we'll never actually use them
1252 * for anything and thus we don't want mark their pointers here
1254 if (!bkey_cmp(k, &ZERO_KEY))
1257 for (i = 0; i < KEY_PTRS(k); i++) {
1258 if (!ptr_available(c, k, i))
1261 g = PTR_BUCKET(c, k, i);
1263 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1264 g->last_gc = PTR_GEN(k, i);
1266 if (ptr_stale(c, k, i)) {
1267 stale = max(stale, ptr_stale(c, k, i));
1271 cache_bug_on(GC_MARK(g) &&
1272 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1273 c, "inconsistent ptrs: mark = %llu, level = %i",
1277 SET_GC_MARK(g, GC_MARK_METADATA);
1278 else if (KEY_DIRTY(k))
1279 SET_GC_MARK(g, GC_MARK_DIRTY);
1280 else if (!GC_MARK(g))
1281 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1283 /* guard against overflow */
1284 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1285 GC_SECTORS_USED(g) + KEY_SIZE(k),
1286 MAX_GC_SECTORS_USED));
1288 BUG_ON(!GC_SECTORS_USED(g));
1294 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1296 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1300 for (i = 0; i < KEY_PTRS(k); i++)
1301 if (ptr_available(c, k, i) &&
1302 !ptr_stale(c, k, i)) {
1303 struct bucket *b = PTR_BUCKET(c, k, i);
1305 b->gen = PTR_GEN(k, i);
1307 if (level && bkey_cmp(k, &ZERO_KEY))
1308 b->prio = BTREE_PRIO;
1309 else if (!level && b->prio == BTREE_PRIO)
1310 b->prio = INITIAL_PRIO;
1313 __bch_btree_mark_key(c, level, k);
1316 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1318 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1321 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1324 unsigned int keys = 0, good_keys = 0;
1326 struct btree_iter iter;
1327 struct bset_tree *t;
1331 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1332 stale = max(stale, btree_mark_key(b, k));
1335 if (bch_ptr_bad(&b->keys, k))
1338 gc->key_bytes += bkey_u64s(k);
1342 gc->data += KEY_SIZE(k);
1345 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1346 btree_bug_on(t->size &&
1347 bset_written(&b->keys, t) &&
1348 bkey_cmp(&b->key, &t->end) < 0,
1349 b, "found short btree key in gc");
1351 if (b->c->gc_always_rewrite)
1357 if ((keys - good_keys) * 2 > keys)
1363 #define GC_MERGE_NODES 4U
1365 struct gc_merge_info {
1370 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1371 struct keylist *insert_keys,
1372 atomic_t *journal_ref,
1373 struct bkey *replace_key);
1375 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1376 struct gc_stat *gc, struct gc_merge_info *r)
1378 unsigned int i, nodes = 0, keys = 0, blocks;
1379 struct btree *new_nodes[GC_MERGE_NODES];
1380 struct keylist keylist;
1384 bch_keylist_init(&keylist);
1386 if (btree_check_reserve(b, NULL))
1389 memset(new_nodes, 0, sizeof(new_nodes));
1390 closure_init_stack(&cl);
1392 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1393 keys += r[nodes++].keys;
1395 blocks = btree_default_blocks(b->c) * 2 / 3;
1398 __set_blocks(b->keys.set[0].data, keys,
1399 block_bytes(b->c)) > blocks * (nodes - 1))
1402 for (i = 0; i < nodes; i++) {
1403 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1404 if (IS_ERR_OR_NULL(new_nodes[i]))
1405 goto out_nocoalesce;
1409 * We have to check the reserve here, after we've allocated our new
1410 * nodes, to make sure the insert below will succeed - we also check
1411 * before as an optimization to potentially avoid a bunch of expensive
1414 if (btree_check_reserve(b, NULL))
1415 goto out_nocoalesce;
1417 for (i = 0; i < nodes; i++)
1418 mutex_lock(&new_nodes[i]->write_lock);
1420 for (i = nodes - 1; i > 0; --i) {
1421 struct bset *n1 = btree_bset_first(new_nodes[i]);
1422 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1423 struct bkey *k, *last = NULL;
1429 k < bset_bkey_last(n2);
1431 if (__set_blocks(n1, n1->keys + keys +
1433 block_bytes(b->c)) > blocks)
1437 keys += bkey_u64s(k);
1441 * Last node we're not getting rid of - we're getting
1442 * rid of the node at r[0]. Have to try and fit all of
1443 * the remaining keys into this node; we can't ensure
1444 * they will always fit due to rounding and variable
1445 * length keys (shouldn't be possible in practice,
1448 if (__set_blocks(n1, n1->keys + n2->keys,
1449 block_bytes(b->c)) >
1450 btree_blocks(new_nodes[i]))
1451 goto out_nocoalesce;
1454 /* Take the key of the node we're getting rid of */
1458 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1459 btree_blocks(new_nodes[i]));
1462 bkey_copy_key(&new_nodes[i]->key, last);
1464 memcpy(bset_bkey_last(n1),
1466 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1469 r[i].keys = n1->keys;
1472 bset_bkey_idx(n2, keys),
1473 (void *) bset_bkey_last(n2) -
1474 (void *) bset_bkey_idx(n2, keys));
1478 if (__bch_keylist_realloc(&keylist,
1479 bkey_u64s(&new_nodes[i]->key)))
1480 goto out_nocoalesce;
1482 bch_btree_node_write(new_nodes[i], &cl);
1483 bch_keylist_add(&keylist, &new_nodes[i]->key);
1486 for (i = 0; i < nodes; i++)
1487 mutex_unlock(&new_nodes[i]->write_lock);
1491 /* We emptied out this node */
1492 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1493 btree_node_free(new_nodes[0]);
1494 rw_unlock(true, new_nodes[0]);
1495 new_nodes[0] = NULL;
1497 for (i = 0; i < nodes; i++) {
1498 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1499 goto out_nocoalesce;
1501 make_btree_freeing_key(r[i].b, keylist.top);
1502 bch_keylist_push(&keylist);
1505 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1506 BUG_ON(!bch_keylist_empty(&keylist));
1508 for (i = 0; i < nodes; i++) {
1509 btree_node_free(r[i].b);
1510 rw_unlock(true, r[i].b);
1512 r[i].b = new_nodes[i];
1515 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1516 r[nodes - 1].b = ERR_PTR(-EINTR);
1518 trace_bcache_btree_gc_coalesce(nodes);
1521 bch_keylist_free(&keylist);
1523 /* Invalidated our iterator */
1529 while ((k = bch_keylist_pop(&keylist)))
1530 if (!bkey_cmp(k, &ZERO_KEY))
1531 atomic_dec(&b->c->prio_blocked);
1532 bch_keylist_free(&keylist);
1534 for (i = 0; i < nodes; i++)
1535 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1536 btree_node_free(new_nodes[i]);
1537 rw_unlock(true, new_nodes[i]);
1542 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1543 struct btree *replace)
1545 struct keylist keys;
1548 if (btree_check_reserve(b, NULL))
1551 n = btree_node_alloc_replacement(replace, NULL);
1553 /* recheck reserve after allocating replacement node */
1554 if (btree_check_reserve(b, NULL)) {
1560 bch_btree_node_write_sync(n);
1562 bch_keylist_init(&keys);
1563 bch_keylist_add(&keys, &n->key);
1565 make_btree_freeing_key(replace, keys.top);
1566 bch_keylist_push(&keys);
1568 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1569 BUG_ON(!bch_keylist_empty(&keys));
1571 btree_node_free(replace);
1574 /* Invalidated our iterator */
1578 static unsigned int btree_gc_count_keys(struct btree *b)
1581 struct btree_iter iter;
1582 unsigned int ret = 0;
1584 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1585 ret += bkey_u64s(k);
1590 static size_t btree_gc_min_nodes(struct cache_set *c)
1595 * Since incremental GC would stop 100ms when front
1596 * side I/O comes, so when there are many btree nodes,
1597 * if GC only processes constant (100) nodes each time,
1598 * GC would last a long time, and the front side I/Os
1599 * would run out of the buckets (since no new bucket
1600 * can be allocated during GC), and be blocked again.
1601 * So GC should not process constant nodes, but varied
1602 * nodes according to the number of btree nodes, which
1603 * realized by dividing GC into constant(100) times,
1604 * so when there are many btree nodes, GC can process
1605 * more nodes each time, otherwise, GC will process less
1606 * nodes each time (but no less than MIN_GC_NODES)
1608 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1609 if (min_nodes < MIN_GC_NODES)
1610 min_nodes = MIN_GC_NODES;
1616 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1617 struct closure *writes, struct gc_stat *gc)
1620 bool should_rewrite;
1622 struct btree_iter iter;
1623 struct gc_merge_info r[GC_MERGE_NODES];
1624 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1626 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1628 for (i = r; i < r + ARRAY_SIZE(r); i++)
1629 i->b = ERR_PTR(-EINTR);
1632 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1634 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1637 ret = PTR_ERR(r->b);
1641 r->keys = btree_gc_count_keys(r->b);
1643 ret = btree_gc_coalesce(b, op, gc, r);
1651 if (!IS_ERR(last->b)) {
1652 should_rewrite = btree_gc_mark_node(last->b, gc);
1653 if (should_rewrite) {
1654 ret = btree_gc_rewrite_node(b, op, last->b);
1659 if (last->b->level) {
1660 ret = btree_gc_recurse(last->b, op, writes, gc);
1665 bkey_copy_key(&b->c->gc_done, &last->b->key);
1668 * Must flush leaf nodes before gc ends, since replace
1669 * operations aren't journalled
1671 mutex_lock(&last->b->write_lock);
1672 if (btree_node_dirty(last->b))
1673 bch_btree_node_write(last->b, writes);
1674 mutex_unlock(&last->b->write_lock);
1675 rw_unlock(true, last->b);
1678 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1681 if (atomic_read(&b->c->search_inflight) &&
1682 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1683 gc->nodes_pre = gc->nodes;
1688 if (need_resched()) {
1694 for (i = r; i < r + ARRAY_SIZE(r); i++)
1695 if (!IS_ERR_OR_NULL(i->b)) {
1696 mutex_lock(&i->b->write_lock);
1697 if (btree_node_dirty(i->b))
1698 bch_btree_node_write(i->b, writes);
1699 mutex_unlock(&i->b->write_lock);
1700 rw_unlock(true, i->b);
1706 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1707 struct closure *writes, struct gc_stat *gc)
1709 struct btree *n = NULL;
1711 bool should_rewrite;
1713 should_rewrite = btree_gc_mark_node(b, gc);
1714 if (should_rewrite) {
1715 n = btree_node_alloc_replacement(b, NULL);
1717 if (!IS_ERR_OR_NULL(n)) {
1718 bch_btree_node_write_sync(n);
1720 bch_btree_set_root(n);
1728 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1731 ret = btree_gc_recurse(b, op, writes, gc);
1736 bkey_copy_key(&b->c->gc_done, &b->key);
1741 static void btree_gc_start(struct cache_set *c)
1747 if (!c->gc_mark_valid)
1750 mutex_lock(&c->bucket_lock);
1752 c->gc_mark_valid = 0;
1753 c->gc_done = ZERO_KEY;
1755 for_each_cache(ca, c, i)
1756 for_each_bucket(b, ca) {
1757 b->last_gc = b->gen;
1758 if (!atomic_read(&b->pin)) {
1760 SET_GC_SECTORS_USED(b, 0);
1764 mutex_unlock(&c->bucket_lock);
1767 static void bch_btree_gc_finish(struct cache_set *c)
1773 mutex_lock(&c->bucket_lock);
1776 c->gc_mark_valid = 1;
1779 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1780 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1783 /* don't reclaim buckets to which writeback keys point */
1785 for (i = 0; i < c->devices_max_used; i++) {
1786 struct bcache_device *d = c->devices[i];
1787 struct cached_dev *dc;
1788 struct keybuf_key *w, *n;
1791 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1793 dc = container_of(d, struct cached_dev, disk);
1795 spin_lock(&dc->writeback_keys.lock);
1796 rbtree_postorder_for_each_entry_safe(w, n,
1797 &dc->writeback_keys.keys, node)
1798 for (j = 0; j < KEY_PTRS(&w->key); j++)
1799 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1801 spin_unlock(&dc->writeback_keys.lock);
1805 c->avail_nbuckets = 0;
1806 for_each_cache(ca, c, i) {
1809 ca->invalidate_needs_gc = 0;
1811 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1812 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1814 for (i = ca->prio_buckets;
1815 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1816 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1818 for_each_bucket(b, ca) {
1819 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1821 if (atomic_read(&b->pin))
1824 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1826 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1827 c->avail_nbuckets++;
1831 mutex_unlock(&c->bucket_lock);
1834 static void bch_btree_gc(struct cache_set *c)
1837 struct gc_stat stats;
1838 struct closure writes;
1840 uint64_t start_time = local_clock();
1842 trace_bcache_gc_start(c);
1844 memset(&stats, 0, sizeof(struct gc_stat));
1845 closure_init_stack(&writes);
1846 bch_btree_op_init(&op, SHRT_MAX);
1850 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1852 ret = btree_root(gc_root, c, &op, &writes, &stats);
1853 closure_sync(&writes);
1857 schedule_timeout_interruptible(msecs_to_jiffies
1860 pr_warn("gc failed!");
1861 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1863 bch_btree_gc_finish(c);
1864 wake_up_allocators(c);
1866 bch_time_stats_update(&c->btree_gc_time, start_time);
1868 stats.key_bytes *= sizeof(uint64_t);
1870 bch_update_bucket_in_use(c, &stats);
1871 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1873 trace_bcache_gc_end(c);
1878 static bool gc_should_run(struct cache_set *c)
1883 for_each_cache(ca, c, i)
1884 if (ca->invalidate_needs_gc)
1887 if (atomic_read(&c->sectors_to_gc) < 0)
1893 static int bch_gc_thread(void *arg)
1895 struct cache_set *c = arg;
1898 wait_event_interruptible(c->gc_wait,
1899 kthread_should_stop() ||
1900 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1903 if (kthread_should_stop() ||
1904 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1911 wait_for_kthread_stop();
1915 int bch_gc_thread_start(struct cache_set *c)
1918 * In case previous btree check operation occupies too many
1919 * system memory for bcache btree node cache, and the
1920 * registering process is selected by OOM killer. Here just
1921 * ignore the SIGKILL sent by OOM killer if there is, to
1922 * avoid kthread_run() being failed by pending signals. The
1923 * bcache registering process will exit after the registration
1926 if (signal_pending(current))
1927 flush_signals(current);
1929 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1930 return PTR_ERR_OR_ZERO(c->gc_thread);
1933 /* Initial partial gc */
1935 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1938 struct bkey *k, *p = NULL;
1939 struct btree_iter iter;
1941 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1942 bch_initial_mark_key(b->c, b->level, k);
1944 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1947 bch_btree_iter_init(&b->keys, &iter, NULL);
1950 k = bch_btree_iter_next_filter(&iter, &b->keys,
1953 btree_node_prefetch(b, k);
1955 * initiallize c->gc_stats.nodes
1956 * for incremental GC
1958 b->c->gc_stats.nodes++;
1962 ret = btree(check_recurse, p, b, op);
1965 } while (p && !ret);
1971 int bch_btree_check(struct cache_set *c)
1975 bch_btree_op_init(&op, SHRT_MAX);
1977 return btree_root(check_recurse, c, &op);
1980 void bch_initial_gc_finish(struct cache_set *c)
1986 bch_btree_gc_finish(c);
1988 mutex_lock(&c->bucket_lock);
1991 * We need to put some unused buckets directly on the prio freelist in
1992 * order to get the allocator thread started - it needs freed buckets in
1993 * order to rewrite the prios and gens, and it needs to rewrite prios
1994 * and gens in order to free buckets.
1996 * This is only safe for buckets that have no live data in them, which
1997 * there should always be some of.
1999 for_each_cache(ca, c, i) {
2000 for_each_bucket(b, ca) {
2001 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2002 fifo_full(&ca->free[RESERVE_BTREE]))
2005 if (bch_can_invalidate_bucket(ca, b) &&
2007 __bch_invalidate_one_bucket(ca, b);
2008 if (!fifo_push(&ca->free[RESERVE_PRIO],
2010 fifo_push(&ca->free[RESERVE_BTREE],
2016 mutex_unlock(&c->bucket_lock);
2019 /* Btree insertion */
2021 static bool btree_insert_key(struct btree *b, struct bkey *k,
2022 struct bkey *replace_key)
2024 unsigned int status;
2026 BUG_ON(bkey_cmp(k, &b->key) > 0);
2028 status = bch_btree_insert_key(&b->keys, k, replace_key);
2029 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2030 bch_check_keys(&b->keys, "%u for %s", status,
2031 replace_key ? "replace" : "insert");
2033 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2040 static size_t insert_u64s_remaining(struct btree *b)
2042 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2045 * Might land in the middle of an existing extent and have to split it
2047 if (b->keys.ops->is_extents)
2048 ret -= KEY_MAX_U64S;
2050 return max(ret, 0L);
2053 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2054 struct keylist *insert_keys,
2055 struct bkey *replace_key)
2058 int oldsize = bch_count_data(&b->keys);
2060 while (!bch_keylist_empty(insert_keys)) {
2061 struct bkey *k = insert_keys->keys;
2063 if (bkey_u64s(k) > insert_u64s_remaining(b))
2066 if (bkey_cmp(k, &b->key) <= 0) {
2070 ret |= btree_insert_key(b, k, replace_key);
2071 bch_keylist_pop_front(insert_keys);
2072 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2073 BKEY_PADDED(key) temp;
2074 bkey_copy(&temp.key, insert_keys->keys);
2076 bch_cut_back(&b->key, &temp.key);
2077 bch_cut_front(&b->key, insert_keys->keys);
2079 ret |= btree_insert_key(b, &temp.key, replace_key);
2087 op->insert_collision = true;
2089 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2091 BUG_ON(bch_count_data(&b->keys) < oldsize);
2095 static int btree_split(struct btree *b, struct btree_op *op,
2096 struct keylist *insert_keys,
2097 struct bkey *replace_key)
2100 struct btree *n1, *n2 = NULL, *n3 = NULL;
2101 uint64_t start_time = local_clock();
2103 struct keylist parent_keys;
2105 closure_init_stack(&cl);
2106 bch_keylist_init(&parent_keys);
2108 if (btree_check_reserve(b, op)) {
2112 WARN(1, "insufficient reserve for split\n");
2115 n1 = btree_node_alloc_replacement(b, op);
2119 split = set_blocks(btree_bset_first(n1),
2120 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2123 unsigned int keys = 0;
2125 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2127 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2132 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2137 mutex_lock(&n1->write_lock);
2138 mutex_lock(&n2->write_lock);
2140 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2143 * Has to be a linear search because we don't have an auxiliary
2147 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2148 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2151 bkey_copy_key(&n1->key,
2152 bset_bkey_idx(btree_bset_first(n1), keys));
2153 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2155 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2156 btree_bset_first(n1)->keys = keys;
2158 memcpy(btree_bset_first(n2)->start,
2159 bset_bkey_last(btree_bset_first(n1)),
2160 btree_bset_first(n2)->keys * sizeof(uint64_t));
2162 bkey_copy_key(&n2->key, &b->key);
2164 bch_keylist_add(&parent_keys, &n2->key);
2165 bch_btree_node_write(n2, &cl);
2166 mutex_unlock(&n2->write_lock);
2167 rw_unlock(true, n2);
2169 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2171 mutex_lock(&n1->write_lock);
2172 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2175 bch_keylist_add(&parent_keys, &n1->key);
2176 bch_btree_node_write(n1, &cl);
2177 mutex_unlock(&n1->write_lock);
2180 /* Depth increases, make a new root */
2181 mutex_lock(&n3->write_lock);
2182 bkey_copy_key(&n3->key, &MAX_KEY);
2183 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2184 bch_btree_node_write(n3, &cl);
2185 mutex_unlock(&n3->write_lock);
2188 bch_btree_set_root(n3);
2189 rw_unlock(true, n3);
2190 } else if (!b->parent) {
2191 /* Root filled up but didn't need to be split */
2193 bch_btree_set_root(n1);
2195 /* Split a non root node */
2197 make_btree_freeing_key(b, parent_keys.top);
2198 bch_keylist_push(&parent_keys);
2200 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2201 BUG_ON(!bch_keylist_empty(&parent_keys));
2205 rw_unlock(true, n1);
2207 bch_time_stats_update(&b->c->btree_split_time, start_time);
2211 bkey_put(b->c, &n2->key);
2212 btree_node_free(n2);
2213 rw_unlock(true, n2);
2215 bkey_put(b->c, &n1->key);
2216 btree_node_free(n1);
2217 rw_unlock(true, n1);
2219 WARN(1, "bcache: btree split failed (level %u)", b->level);
2221 if (n3 == ERR_PTR(-EAGAIN) ||
2222 n2 == ERR_PTR(-EAGAIN) ||
2223 n1 == ERR_PTR(-EAGAIN))
2229 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2230 struct keylist *insert_keys,
2231 atomic_t *journal_ref,
2232 struct bkey *replace_key)
2236 BUG_ON(b->level && replace_key);
2238 closure_init_stack(&cl);
2240 mutex_lock(&b->write_lock);
2242 if (write_block(b) != btree_bset_last(b) &&
2243 b->keys.last_set_unwritten)
2244 bch_btree_init_next(b); /* just wrote a set */
2246 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2247 mutex_unlock(&b->write_lock);
2251 BUG_ON(write_block(b) != btree_bset_last(b));
2253 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2255 bch_btree_leaf_dirty(b, journal_ref);
2257 bch_btree_node_write(b, &cl);
2260 mutex_unlock(&b->write_lock);
2262 /* wait for btree node write if necessary, after unlock */
2267 if (current->bio_list) {
2268 op->lock = b->c->root->level + 1;
2270 } else if (op->lock <= b->c->root->level) {
2271 op->lock = b->c->root->level + 1;
2274 /* Invalidated all iterators */
2275 int ret = btree_split(b, op, insert_keys, replace_key);
2277 if (bch_keylist_empty(insert_keys))
2285 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2286 struct bkey *check_key)
2289 uint64_t btree_ptr = b->key.ptr[0];
2290 unsigned long seq = b->seq;
2291 struct keylist insert;
2292 bool upgrade = op->lock == -1;
2294 bch_keylist_init(&insert);
2297 rw_unlock(false, b);
2298 rw_lock(true, b, b->level);
2300 if (b->key.ptr[0] != btree_ptr ||
2301 b->seq != seq + 1) {
2302 op->lock = b->level;
2307 SET_KEY_PTRS(check_key, 1);
2308 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2310 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2312 bch_keylist_add(&insert, check_key);
2314 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2316 BUG_ON(!ret && !bch_keylist_empty(&insert));
2319 downgrade_write(&b->lock);
2323 struct btree_insert_op {
2325 struct keylist *keys;
2326 atomic_t *journal_ref;
2327 struct bkey *replace_key;
2330 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2332 struct btree_insert_op *op = container_of(b_op,
2333 struct btree_insert_op, op);
2335 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2336 op->journal_ref, op->replace_key);
2337 if (ret && !bch_keylist_empty(op->keys))
2343 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2344 atomic_t *journal_ref, struct bkey *replace_key)
2346 struct btree_insert_op op;
2349 BUG_ON(current->bio_list);
2350 BUG_ON(bch_keylist_empty(keys));
2352 bch_btree_op_init(&op.op, 0);
2354 op.journal_ref = journal_ref;
2355 op.replace_key = replace_key;
2357 while (!ret && !bch_keylist_empty(keys)) {
2359 ret = bch_btree_map_leaf_nodes(&op.op, c,
2360 &START_KEY(keys->keys),
2367 pr_err("error %i", ret);
2369 while ((k = bch_keylist_pop(keys)))
2371 } else if (op.op.insert_collision)
2377 void bch_btree_set_root(struct btree *b)
2382 closure_init_stack(&cl);
2384 trace_bcache_btree_set_root(b);
2386 BUG_ON(!b->written);
2388 for (i = 0; i < KEY_PTRS(&b->key); i++)
2389 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2391 mutex_lock(&b->c->bucket_lock);
2392 list_del_init(&b->list);
2393 mutex_unlock(&b->c->bucket_lock);
2397 bch_journal_meta(b->c, &cl);
2401 /* Map across nodes or keys */
2403 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2405 btree_map_nodes_fn *fn, int flags)
2407 int ret = MAP_CONTINUE;
2411 struct btree_iter iter;
2413 bch_btree_iter_init(&b->keys, &iter, from);
2415 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2417 ret = btree(map_nodes_recurse, k, b,
2418 op, from, fn, flags);
2421 if (ret != MAP_CONTINUE)
2426 if (!b->level || flags == MAP_ALL_NODES)
2432 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2433 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2435 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2438 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2439 struct bkey *from, btree_map_keys_fn *fn,
2442 int ret = MAP_CONTINUE;
2444 struct btree_iter iter;
2446 bch_btree_iter_init(&b->keys, &iter, from);
2448 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2451 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2454 if (ret != MAP_CONTINUE)
2458 if (!b->level && (flags & MAP_END_KEY))
2459 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2460 KEY_OFFSET(&b->key), 0));
2465 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2466 struct bkey *from, btree_map_keys_fn *fn, int flags)
2468 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2473 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2475 /* Overlapping keys compare equal */
2476 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2478 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2483 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2484 struct keybuf_key *r)
2486 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2491 unsigned int nr_found;
2494 keybuf_pred_fn *pred;
2497 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2500 struct refill *refill = container_of(op, struct refill, op);
2501 struct keybuf *buf = refill->buf;
2502 int ret = MAP_CONTINUE;
2504 if (bkey_cmp(k, refill->end) > 0) {
2509 if (!KEY_SIZE(k)) /* end key */
2512 if (refill->pred(buf, k)) {
2513 struct keybuf_key *w;
2515 spin_lock(&buf->lock);
2517 w = array_alloc(&buf->freelist);
2519 spin_unlock(&buf->lock);
2524 bkey_copy(&w->key, k);
2526 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2527 array_free(&buf->freelist, w);
2531 if (array_freelist_empty(&buf->freelist))
2534 spin_unlock(&buf->lock);
2537 buf->last_scanned = *k;
2541 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2542 struct bkey *end, keybuf_pred_fn *pred)
2544 struct bkey start = buf->last_scanned;
2545 struct refill refill;
2549 bch_btree_op_init(&refill.op, -1);
2550 refill.nr_found = 0;
2555 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2556 refill_keybuf_fn, MAP_END_KEY);
2558 trace_bcache_keyscan(refill.nr_found,
2559 KEY_INODE(&start), KEY_OFFSET(&start),
2560 KEY_INODE(&buf->last_scanned),
2561 KEY_OFFSET(&buf->last_scanned));
2563 spin_lock(&buf->lock);
2565 if (!RB_EMPTY_ROOT(&buf->keys)) {
2566 struct keybuf_key *w;
2568 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2569 buf->start = START_KEY(&w->key);
2571 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2574 buf->start = MAX_KEY;
2578 spin_unlock(&buf->lock);
2581 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2583 rb_erase(&w->node, &buf->keys);
2584 array_free(&buf->freelist, w);
2587 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2589 spin_lock(&buf->lock);
2590 __bch_keybuf_del(buf, w);
2591 spin_unlock(&buf->lock);
2594 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2598 struct keybuf_key *p, *w, s;
2602 if (bkey_cmp(end, &buf->start) <= 0 ||
2603 bkey_cmp(start, &buf->end) >= 0)
2606 spin_lock(&buf->lock);
2607 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2609 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2611 w = RB_NEXT(w, node);
2616 __bch_keybuf_del(buf, p);
2619 spin_unlock(&buf->lock);
2623 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2625 struct keybuf_key *w;
2627 spin_lock(&buf->lock);
2629 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2631 while (w && w->private)
2632 w = RB_NEXT(w, node);
2635 w->private = ERR_PTR(-EINTR);
2637 spin_unlock(&buf->lock);
2641 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2644 keybuf_pred_fn *pred)
2646 struct keybuf_key *ret;
2649 ret = bch_keybuf_next(buf);
2653 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2654 pr_debug("scan finished");
2658 bch_refill_keybuf(c, buf, end, pred);
2664 void bch_keybuf_init(struct keybuf *buf)
2666 buf->last_scanned = MAX_KEY;
2667 buf->keys = RB_ROOT;
2669 spin_lock_init(&buf->lock);
2670 array_allocator_init(&buf->freelist);