1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
13 * See if we have deferred clears that we can batch move
15 static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
17 unsigned long mask, val;
21 spin_lock_irqsave(&sb->map[index].swap_lock, flags);
23 if (!sb->map[index].cleared)
27 * First get a stable cleared mask, setting the old mask to 0.
30 mask = sb->map[index].cleared;
31 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
34 * Now clear the masked bits in our free word
37 val = sb->map[index].word;
38 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
42 spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
46 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
47 gfp_t flags, int node)
49 unsigned int bits_per_word;
53 shift = ilog2(BITS_PER_LONG);
55 * If the bitmap is small, shrink the number of bits per word so
56 * we spread over a few cachelines, at least. If less than 4
57 * bits, just forget about it, it's not going to work optimally
61 while ((4U << shift) > depth)
65 bits_per_word = 1U << shift;
66 if (bits_per_word > BITS_PER_LONG)
71 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
78 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
82 for (i = 0; i < sb->map_nr; i++) {
83 sb->map[i].depth = min(depth, bits_per_word);
84 depth -= sb->map[i].depth;
85 spin_lock_init(&sb->map[i].swap_lock);
89 EXPORT_SYMBOL_GPL(sbitmap_init_node);
91 void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
93 unsigned int bits_per_word = 1U << sb->shift;
96 for (i = 0; i < sb->map_nr; i++)
97 sbitmap_deferred_clear(sb, i);
100 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
102 for (i = 0; i < sb->map_nr; i++) {
103 sb->map[i].depth = min(depth, bits_per_word);
104 depth -= sb->map[i].depth;
107 EXPORT_SYMBOL_GPL(sbitmap_resize);
109 static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
110 unsigned int hint, bool wrap)
112 unsigned int orig_hint = hint;
116 nr = find_next_zero_bit(word, depth, hint);
117 if (unlikely(nr >= depth)) {
119 * We started with an offset, and we didn't reset the
120 * offset to 0 in a failure case, so start from 0 to
123 if (orig_hint && hint && wrap) {
124 hint = orig_hint = 0;
130 if (!test_and_set_bit_lock(nr, word))
134 if (hint >= depth - 1)
141 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
142 unsigned int alloc_hint, bool round_robin)
147 nr = __sbitmap_get_word(&sb->map[index].word,
148 sb->map[index].depth, alloc_hint,
152 if (!sbitmap_deferred_clear(sb, index))
159 int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
161 unsigned int i, index;
164 index = SB_NR_TO_INDEX(sb, alloc_hint);
167 * Unless we're doing round robin tag allocation, just use the
168 * alloc_hint to find the right word index. No point in looping
169 * twice in find_next_zero_bit() for that case.
172 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
176 for (i = 0; i < sb->map_nr; i++) {
177 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
180 nr += index << sb->shift;
184 /* Jump to next index. */
186 if (++index >= sb->map_nr)
192 EXPORT_SYMBOL_GPL(sbitmap_get);
194 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
195 unsigned long shallow_depth)
197 unsigned int i, index;
200 index = SB_NR_TO_INDEX(sb, alloc_hint);
202 for (i = 0; i < sb->map_nr; i++) {
204 nr = __sbitmap_get_word(&sb->map[index].word,
205 min(sb->map[index].depth, shallow_depth),
206 SB_NR_TO_BIT(sb, alloc_hint), true);
208 nr += index << sb->shift;
212 if (sbitmap_deferred_clear(sb, index))
215 /* Jump to next index. */
217 alloc_hint = index << sb->shift;
219 if (index >= sb->map_nr) {
227 EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
229 bool sbitmap_any_bit_set(const struct sbitmap *sb)
233 for (i = 0; i < sb->map_nr; i++) {
234 if (sb->map[i].word & ~sb->map[i].cleared)
239 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
241 bool sbitmap_any_bit_clear(const struct sbitmap *sb)
245 for (i = 0; i < sb->map_nr; i++) {
246 const struct sbitmap_word *word = &sb->map[i];
247 unsigned long mask = word->word & ~word->cleared;
250 ret = find_first_zero_bit(&mask, word->depth);
251 if (ret < word->depth)
256 EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
258 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
260 unsigned int i, weight = 0;
262 for (i = 0; i < sb->map_nr; i++) {
263 const struct sbitmap_word *word = &sb->map[i];
266 weight += bitmap_weight(&word->word, word->depth);
268 weight += bitmap_weight(&word->cleared, word->depth);
273 static unsigned int sbitmap_weight(const struct sbitmap *sb)
275 return __sbitmap_weight(sb, true);
278 static unsigned int sbitmap_cleared(const struct sbitmap *sb)
280 return __sbitmap_weight(sb, false);
283 void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
285 seq_printf(m, "depth=%u\n", sb->depth);
286 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
287 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
288 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
289 seq_printf(m, "map_nr=%u\n", sb->map_nr);
291 EXPORT_SYMBOL_GPL(sbitmap_show);
293 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
295 if ((offset & 0xf) == 0) {
298 seq_printf(m, "%08x:", offset);
300 if ((offset & 0x1) == 0)
302 seq_printf(m, "%02x", byte);
305 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
308 unsigned int byte_bits = 0;
309 unsigned int offset = 0;
312 for (i = 0; i < sb->map_nr; i++) {
313 unsigned long word = READ_ONCE(sb->map[i].word);
314 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
316 while (word_bits > 0) {
317 unsigned int bits = min(8 - byte_bits, word_bits);
319 byte |= (word & (BIT(bits) - 1)) << byte_bits;
321 if (byte_bits == 8) {
322 emit_byte(m, offset, byte);
332 emit_byte(m, offset, byte);
338 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
340 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
343 unsigned int wake_batch;
344 unsigned int shallow_depth;
347 * For each batch, we wake up one queue. We need to make sure that our
348 * batch size is small enough that the full depth of the bitmap,
349 * potentially limited by a shallow depth, is enough to wake up all of
352 * Each full word of the bitmap has bits_per_word bits, and there might
353 * be a partial word. There are depth / bits_per_word full words and
354 * depth % bits_per_word bits left over. In bitwise arithmetic:
356 * bits_per_word = 1 << shift
357 * depth / bits_per_word = depth >> shift
358 * depth % bits_per_word = depth & ((1 << shift) - 1)
360 * Each word can be limited to sbq->min_shallow_depth bits.
362 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
363 depth = ((depth >> sbq->sb.shift) * shallow_depth +
364 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
365 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
371 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
372 int shift, bool round_robin, gfp_t flags, int node)
377 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
381 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
382 if (!sbq->alloc_hint) {
383 sbitmap_free(&sbq->sb);
387 if (depth && !round_robin) {
388 for_each_possible_cpu(i)
389 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
392 sbq->min_shallow_depth = UINT_MAX;
393 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
394 atomic_set(&sbq->wake_index, 0);
395 atomic_set(&sbq->ws_active, 0);
397 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
399 free_percpu(sbq->alloc_hint);
400 sbitmap_free(&sbq->sb);
404 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
405 init_waitqueue_head(&sbq->ws[i].wait);
406 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
409 sbq->round_robin = round_robin;
412 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
414 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
417 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
420 if (sbq->wake_batch != wake_batch) {
421 WRITE_ONCE(sbq->wake_batch, wake_batch);
423 * Pairs with the memory barrier in sbitmap_queue_wake_up()
424 * to ensure that the batch size is updated before the wait
428 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
429 atomic_set(&sbq->ws[i].wait_cnt, 1);
433 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
435 sbitmap_queue_update_wake_batch(sbq, depth);
436 sbitmap_resize(&sbq->sb, depth);
438 EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
440 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
442 unsigned int hint, depth;
445 hint = this_cpu_read(*sbq->alloc_hint);
446 depth = READ_ONCE(sbq->sb.depth);
447 if (unlikely(hint >= depth)) {
448 hint = depth ? prandom_u32() % depth : 0;
449 this_cpu_write(*sbq->alloc_hint, hint);
451 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
454 /* If the map is full, a hint won't do us much good. */
455 this_cpu_write(*sbq->alloc_hint, 0);
456 } else if (nr == hint || unlikely(sbq->round_robin)) {
457 /* Only update the hint if we used it. */
459 if (hint >= depth - 1)
461 this_cpu_write(*sbq->alloc_hint, hint);
466 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
468 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
469 unsigned int shallow_depth)
471 unsigned int hint, depth;
474 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
476 hint = this_cpu_read(*sbq->alloc_hint);
477 depth = READ_ONCE(sbq->sb.depth);
478 if (unlikely(hint >= depth)) {
479 hint = depth ? prandom_u32() % depth : 0;
480 this_cpu_write(*sbq->alloc_hint, hint);
482 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
485 /* If the map is full, a hint won't do us much good. */
486 this_cpu_write(*sbq->alloc_hint, 0);
487 } else if (nr == hint || unlikely(sbq->round_robin)) {
488 /* Only update the hint if we used it. */
490 if (hint >= depth - 1)
492 this_cpu_write(*sbq->alloc_hint, hint);
497 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
499 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
500 unsigned int min_shallow_depth)
502 sbq->min_shallow_depth = min_shallow_depth;
503 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
505 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
507 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
511 if (!atomic_read(&sbq->ws_active))
514 wake_index = atomic_read(&sbq->wake_index);
515 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
516 struct sbq_wait_state *ws = &sbq->ws[wake_index];
518 if (waitqueue_active(&ws->wait)) {
519 int o = atomic_read(&sbq->wake_index);
522 atomic_cmpxchg(&sbq->wake_index, o, wake_index);
526 wake_index = sbq_index_inc(wake_index);
532 static bool __sbq_wake_up(struct sbitmap_queue *sbq)
534 struct sbq_wait_state *ws;
535 unsigned int wake_batch;
538 ws = sbq_wake_ptr(sbq);
542 wait_cnt = atomic_dec_return(&ws->wait_cnt);
546 wake_batch = READ_ONCE(sbq->wake_batch);
549 * Pairs with the memory barrier in sbitmap_queue_resize() to
550 * ensure that we see the batch size update before the wait
553 smp_mb__before_atomic();
556 * For concurrent callers of this, the one that failed the
557 * atomic_cmpxhcg() race should call this function again
558 * to wakeup a new batch on a different 'ws'.
560 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
561 if (ret == wait_cnt) {
562 sbq_index_atomic_inc(&sbq->wake_index);
563 wake_up_nr(&ws->wait, wake_batch);
573 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
575 while (__sbq_wake_up(sbq))
578 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
580 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
584 * Once the clear bit is set, the bit may be allocated out.
586 * Orders READ/WRITE on the asssociated instance(such as request
587 * of blk_mq) by this bit for avoiding race with re-allocation,
588 * and its pair is the memory barrier implied in __sbitmap_get_word.
590 * One invariant is that the clear bit has to be zero when the bit
593 smp_mb__before_atomic();
594 sbitmap_deferred_clear_bit(&sbq->sb, nr);
597 * Pairs with the memory barrier in set_current_state() to ensure the
598 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
599 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
600 * waiter. See the comment on waitqueue_active().
602 smp_mb__after_atomic();
603 sbitmap_queue_wake_up(sbq);
605 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
606 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
608 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
610 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
615 * Pairs with the memory barrier in set_current_state() like in
616 * sbitmap_queue_wake_up().
619 wake_index = atomic_read(&sbq->wake_index);
620 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
621 struct sbq_wait_state *ws = &sbq->ws[wake_index];
623 if (waitqueue_active(&ws->wait))
626 wake_index = sbq_index_inc(wake_index);
629 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
631 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
636 sbitmap_show(&sbq->sb, m);
638 seq_puts(m, "alloc_hint={");
640 for_each_possible_cpu(i) {
644 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
648 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
649 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
650 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
652 seq_puts(m, "ws={\n");
653 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
654 struct sbq_wait_state *ws = &sbq->ws[i];
656 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
657 atomic_read(&ws->wait_cnt),
658 waitqueue_active(&ws->wait) ? "active" : "inactive");
662 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
663 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
665 EXPORT_SYMBOL_GPL(sbitmap_queue_show);
667 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
668 struct sbq_wait_state *ws,
669 struct sbq_wait *sbq_wait)
671 if (!sbq_wait->sbq) {
673 atomic_inc(&sbq->ws_active);
675 add_wait_queue(&ws->wait, &sbq_wait->wait);
677 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
679 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
681 list_del_init(&sbq_wait->wait.entry);
683 atomic_dec(&sbq_wait->sbq->ws_active);
684 sbq_wait->sbq = NULL;
687 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
689 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
690 struct sbq_wait_state *ws,
691 struct sbq_wait *sbq_wait, int state)
693 if (!sbq_wait->sbq) {
694 atomic_inc(&sbq->ws_active);
697 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
699 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
701 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
702 struct sbq_wait *sbq_wait)
704 finish_wait(&ws->wait, &sbq_wait->wait);
706 atomic_dec(&sbq->ws_active);
707 sbq_wait->sbq = NULL;
710 EXPORT_SYMBOL_GPL(sbitmap_finish_wait);