1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
6 * Based upon the circular ringbuffer.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * pblk-rb.c - pblk's write buffer
20 #include <linux/circ_buf.h>
24 static DECLARE_RWSEM(pblk_rb_lock);
26 static void pblk_rb_data_free(struct pblk_rb *rb)
28 struct pblk_rb_pages *p, *t;
30 down_write(&pblk_rb_lock);
31 list_for_each_entry_safe(p, t, &rb->pages, list) {
32 free_pages((unsigned long)page_address(p->pages), p->order);
36 up_write(&pblk_rb_lock);
39 void pblk_rb_free(struct pblk_rb *rb)
41 pblk_rb_data_free(rb);
46 * pblk_rb_calculate_size -- calculate the size of the write buffer
48 static unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
50 /* Alloc a write buffer that can at least fit 128 entries */
51 return (1 << max(get_count_order(nr_entries), 7));
55 * Initialize ring buffer. The data and metadata buffers must be previously
56 * allocated and their size must be a power of two
57 * (Documentation/core-api/circular-buffers.rst)
59 int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int seg_size)
61 struct pblk *pblk = container_of(rb, struct pblk, rwb);
62 struct pblk_rb_entry *entries;
63 unsigned int init_entry = 0;
64 unsigned int max_order = MAX_ORDER - 1;
65 unsigned int power_size, power_seg_sz;
66 unsigned int alloc_order, order, iter;
67 unsigned int nr_entries;
69 nr_entries = pblk_rb_calculate_size(size);
70 entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
74 power_size = get_count_order(size);
75 power_seg_sz = get_count_order(seg_size);
77 down_write(&pblk_rb_lock);
78 rb->entries = entries;
79 rb->seg_size = (1 << power_seg_sz);
80 rb->nr_entries = (1 << power_size);
81 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
82 rb->flush_point = EMPTY_ENTRY;
84 spin_lock_init(&rb->w_lock);
85 spin_lock_init(&rb->s_lock);
87 INIT_LIST_HEAD(&rb->pages);
89 alloc_order = power_size;
90 if (alloc_order >= max_order) {
92 iter = (1 << (alloc_order - max_order));
99 struct pblk_rb_entry *entry;
100 struct pblk_rb_pages *page_set;
102 unsigned long set_size;
105 page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
107 up_write(&pblk_rb_lock);
112 page_set->order = order;
113 page_set->pages = alloc_pages(GFP_KERNEL, order);
114 if (!page_set->pages) {
116 pblk_rb_data_free(rb);
117 up_write(&pblk_rb_lock);
121 kaddr = page_address(page_set->pages);
123 entry = &rb->entries[init_entry];
125 entry->cacheline = pblk_cacheline_to_addr(init_entry++);
126 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
128 set_size = (1 << order);
129 for (i = 1; i < set_size; i++) {
130 entry = &rb->entries[init_entry];
131 entry->cacheline = pblk_cacheline_to_addr(init_entry++);
132 entry->data = kaddr + (i * rb->seg_size);
133 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
134 bio_list_init(&entry->w_ctx.bios);
137 list_add_tail(&page_set->list, &rb->pages);
140 up_write(&pblk_rb_lock);
142 #ifdef CONFIG_NVM_PBLK_DEBUG
143 atomic_set(&rb->inflight_flush_point, 0);
147 * Initialize rate-limiter, which controls access to the write buffer
148 * but user and GC I/O
150 pblk_rl_init(&pblk->rl, rb->nr_entries);
155 static void clean_wctx(struct pblk_w_ctx *w_ctx)
159 flags = READ_ONCE(w_ctx->flags);
160 WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY),
161 "pblk: overwriting unsubmitted data\n");
163 /* Release flags on context. Protect from writes and reads */
164 smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
165 pblk_ppa_set_empty(&w_ctx->ppa);
166 w_ctx->lba = ADDR_EMPTY;
169 #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
170 #define pblk_rb_ring_space(rb, head, tail, size) \
171 (CIRC_SPACE(head, tail, size))
174 * Buffer space is calculated with respect to the back pointer signaling
175 * synchronized entries to the media.
177 static unsigned int pblk_rb_space(struct pblk_rb *rb)
179 unsigned int mem = READ_ONCE(rb->mem);
180 unsigned int sync = READ_ONCE(rb->sync);
182 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
185 unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
186 unsigned int nr_entries)
188 return (p + nr_entries) & (rb->nr_entries - 1);
192 * Buffer count is calculated with respect to the submission entry signaling the
193 * entries that are available to send to the media
195 unsigned int pblk_rb_read_count(struct pblk_rb *rb)
197 unsigned int mem = READ_ONCE(rb->mem);
198 unsigned int subm = READ_ONCE(rb->subm);
200 return pblk_rb_ring_count(mem, subm, rb->nr_entries);
203 unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
205 unsigned int mem = READ_ONCE(rb->mem);
206 unsigned int sync = READ_ONCE(rb->sync);
208 return pblk_rb_ring_count(mem, sync, rb->nr_entries);
211 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
215 subm = READ_ONCE(rb->subm);
216 /* Commit read means updating submission pointer */
217 smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries));
222 static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
224 struct pblk *pblk = container_of(rb, struct pblk, rwb);
225 struct pblk_line *line;
226 struct pblk_rb_entry *entry;
227 struct pblk_w_ctx *w_ctx;
228 unsigned int user_io = 0, gc_io = 0;
232 for (i = 0; i < to_update; i++) {
233 entry = &rb->entries[rb->l2p_update];
234 w_ctx = &entry->w_ctx;
236 flags = READ_ONCE(entry->w_ctx.flags);
237 if (flags & PBLK_IOTYPE_USER)
239 else if (flags & PBLK_IOTYPE_GC)
242 WARN(1, "pblk: unknown IO type\n");
244 pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
247 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
248 kref_put(&line->ref, pblk_line_put);
250 rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1);
253 pblk_rl_out(&pblk->rl, user_io, gc_io);
259 * When we move the l2p_update pointer, we update the l2p table - lookups will
260 * point to the physical address instead of to the cacheline in the write buffer
261 * from this moment on.
263 static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
264 unsigned int mem, unsigned int sync)
266 unsigned int space, count;
269 lockdep_assert_held(&rb->w_lock);
271 /* Update l2p only as buffer entries are being overwritten */
272 space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
273 if (space > nr_entries)
276 count = nr_entries - space;
277 /* l2p_update used exclusively under rb->w_lock */
278 ret = __pblk_rb_update_l2p(rb, count);
285 * Update the l2p entry for all sectors stored on the write buffer. This means
286 * that all future lookups to the l2p table will point to a device address, not
287 * to the cacheline in the write buffer.
289 void pblk_rb_sync_l2p(struct pblk_rb *rb)
292 unsigned int to_update;
294 spin_lock(&rb->w_lock);
296 /* Protect from reads and writes */
297 sync = smp_load_acquire(&rb->sync);
299 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
300 __pblk_rb_update_l2p(rb, to_update);
302 spin_unlock(&rb->w_lock);
306 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
307 * Typically, 4KB data chunks coming from a bio will be copied to the ring
308 * buffer, thus the write will fail if not all incoming data can be copied.
311 static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
312 struct pblk_w_ctx w_ctx,
313 struct pblk_rb_entry *entry)
315 memcpy(entry->data, data, rb->seg_size);
317 entry->w_ctx.lba = w_ctx.lba;
318 entry->w_ctx.ppa = w_ctx.ppa;
321 void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
322 struct pblk_w_ctx w_ctx, unsigned int ring_pos)
324 struct pblk *pblk = container_of(rb, struct pblk, rwb);
325 struct pblk_rb_entry *entry;
328 entry = &rb->entries[ring_pos];
329 flags = READ_ONCE(entry->w_ctx.flags);
330 #ifdef CONFIG_NVM_PBLK_DEBUG
331 /* Caller must guarantee that the entry is free */
332 BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
335 __pblk_rb_write_entry(rb, data, w_ctx, entry);
337 pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline);
338 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
340 /* Release flags on write context. Protect from writes */
341 smp_store_release(&entry->w_ctx.flags, flags);
344 void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
345 struct pblk_w_ctx w_ctx, struct pblk_line *line,
346 u64 paddr, unsigned int ring_pos)
348 struct pblk *pblk = container_of(rb, struct pblk, rwb);
349 struct pblk_rb_entry *entry;
352 entry = &rb->entries[ring_pos];
353 flags = READ_ONCE(entry->w_ctx.flags);
354 #ifdef CONFIG_NVM_PBLK_DEBUG
355 /* Caller must guarantee that the entry is free */
356 BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
359 __pblk_rb_write_entry(rb, data, w_ctx, entry);
361 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
362 entry->w_ctx.lba = ADDR_EMPTY;
364 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
366 /* Release flags on write context. Protect from writes */
367 smp_store_release(&entry->w_ctx.flags, flags);
370 static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
373 struct pblk_rb_entry *entry;
374 unsigned int sync, flush_point;
376 pblk_rb_sync_init(rb, NULL);
377 sync = READ_ONCE(rb->sync);
380 pblk_rb_sync_end(rb, NULL);
384 #ifdef CONFIG_NVM_PBLK_DEBUG
385 atomic_inc(&rb->inflight_flush_point);
388 flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
389 entry = &rb->entries[flush_point];
391 /* Protect flush points */
392 smp_store_release(&rb->flush_point, flush_point);
395 bio_list_add(&entry->w_ctx.bios, bio);
397 pblk_rb_sync_end(rb, NULL);
402 static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
408 sync = READ_ONCE(rb->sync);
409 mem = READ_ONCE(rb->mem);
411 if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries)
414 if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
422 static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
425 if (!__pblk_rb_may_write(rb, nr_entries, pos))
428 /* Protect from read count */
429 smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries));
433 void pblk_rb_flush(struct pblk_rb *rb)
435 struct pblk *pblk = container_of(rb, struct pblk, rwb);
436 unsigned int mem = READ_ONCE(rb->mem);
438 if (pblk_rb_flush_point_set(rb, NULL, mem))
441 pblk_write_kick(pblk);
444 static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
445 unsigned int *pos, struct bio *bio,
450 if (!__pblk_rb_may_write(rb, nr_entries, pos))
453 mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries);
454 *io_ret = NVM_IO_DONE;
456 if (bio->bi_opf & REQ_PREFLUSH) {
457 struct pblk *pblk = container_of(rb, struct pblk, rwb);
459 atomic64_inc(&pblk->nr_flush);
460 if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
464 /* Protect from read count */
465 smp_store_release(&rb->mem, mem);
471 * Atomically check that (i) there is space on the write buffer for the
472 * incoming I/O, and (ii) the current I/O type has enough budget in the write
473 * buffer (rate-limiter).
475 int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
476 unsigned int nr_entries, unsigned int *pos)
478 struct pblk *pblk = container_of(rb, struct pblk, rwb);
481 spin_lock(&rb->w_lock);
482 io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
484 spin_unlock(&rb->w_lock);
488 if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
489 spin_unlock(&rb->w_lock);
490 return NVM_IO_REQUEUE;
493 pblk_rl_user_in(&pblk->rl, nr_entries);
494 spin_unlock(&rb->w_lock);
500 * Look at pblk_rb_may_write_user comment
502 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
505 struct pblk *pblk = container_of(rb, struct pblk, rwb);
507 spin_lock(&rb->w_lock);
508 if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
509 spin_unlock(&rb->w_lock);
513 if (!pblk_rb_may_write(rb, nr_entries, pos)) {
514 spin_unlock(&rb->w_lock);
518 pblk_rl_gc_in(&pblk->rl, nr_entries);
519 spin_unlock(&rb->w_lock);
525 * Read available entries on rb and add them to the given bio. To avoid a memory
526 * copy, a page reference to the write buffer is used to be added to the bio.
528 * This function is used by the write thread to form the write bio that will
529 * persist data on the write buffer to the media.
531 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
532 unsigned int pos, unsigned int nr_entries,
535 struct pblk *pblk = container_of(rb, struct pblk, rwb);
536 struct request_queue *q = pblk->dev->q;
537 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
538 struct bio *bio = rqd->bio;
539 struct pblk_rb_entry *entry;
541 unsigned int pad = 0, to_read = nr_entries;
545 if (count < nr_entries) {
546 pad = nr_entries - count;
551 c_ctx->nr_valid = to_read;
552 c_ctx->nr_padded = pad;
554 for (i = 0; i < to_read; i++) {
555 entry = &rb->entries[pos];
557 /* A write has been allowed into the buffer, but data is still
558 * being copied to it. It is ok to busy wait.
561 flags = READ_ONCE(entry->w_ctx.flags);
562 if (!(flags & PBLK_WRITTEN_DATA)) {
567 page = virt_to_page(entry->data);
569 pblk_err(pblk, "could not allocate write bio page\n");
570 flags &= ~PBLK_WRITTEN_DATA;
571 flags |= PBLK_SUBMITTED_ENTRY;
572 /* Release flags on context. Protect from writes */
573 smp_store_release(&entry->w_ctx.flags, flags);
577 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
579 pblk_err(pblk, "could not add page to write bio\n");
580 flags &= ~PBLK_WRITTEN_DATA;
581 flags |= PBLK_SUBMITTED_ENTRY;
582 /* Release flags on context. Protect from writes */
583 smp_store_release(&entry->w_ctx.flags, flags);
587 flags &= ~PBLK_WRITTEN_DATA;
588 flags |= PBLK_SUBMITTED_ENTRY;
590 /* Release flags on context. Protect from writes */
591 smp_store_release(&entry->w_ctx.flags, flags);
593 pos = pblk_rb_ptr_wrap(rb, pos, 1);
597 if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
598 pblk_err(pblk, "could not pad page in write bio\n");
602 if (pad < pblk->min_write_pgs)
603 atomic64_inc(&pblk->pad_dist[pad - 1]);
605 pblk_warn(pblk, "padding more than min. sectors\n");
607 atomic64_add(pad, &pblk->pad_wa);
610 #ifdef CONFIG_NVM_PBLK_DEBUG
611 atomic_long_add(pad, &pblk->padded_writes);
618 * Copy to bio only if the lba matches the one on the given cache entry.
619 * Otherwise, it means that the entry has been overwritten, and the bio should
620 * be directed to disk.
622 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
623 struct ppa_addr ppa, int bio_iter, bool advanced_bio)
625 struct pblk *pblk = container_of(rb, struct pblk, rwb);
626 struct pblk_rb_entry *entry;
627 struct pblk_w_ctx *w_ctx;
628 struct ppa_addr l2p_ppa;
629 u64 pos = pblk_addr_to_cacheline(ppa);
635 #ifdef CONFIG_NVM_PBLK_DEBUG
636 /* Caller must ensure that the access will not cause an overflow */
637 BUG_ON(pos >= rb->nr_entries);
639 entry = &rb->entries[pos];
640 w_ctx = &entry->w_ctx;
641 flags = READ_ONCE(w_ctx->flags);
643 spin_lock(&rb->w_lock);
644 spin_lock(&pblk->trans_lock);
645 l2p_ppa = pblk_trans_map_get(pblk, lba);
646 spin_unlock(&pblk->trans_lock);
648 /* Check if the entry has been overwritten or is scheduled to be */
649 if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba ||
650 flags & PBLK_WRITABLE_ENTRY) {
655 /* Only advance the bio if it hasn't been advanced already. If advanced,
656 * this bio is at least a partial bio (i.e., it has partially been
657 * filled with data from the cache). If part of the data resides on the
658 * media, we will read later on
660 if (unlikely(!advanced_bio))
661 bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
663 data = bio_data(bio);
664 memcpy(data, entry->data, rb->seg_size);
667 spin_unlock(&rb->w_lock);
671 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
673 unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0);
675 return &rb->entries[entry].w_ctx;
678 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
679 __acquires(&rb->s_lock)
682 spin_lock_irqsave(&rb->s_lock, *flags);
684 spin_lock_irq(&rb->s_lock);
689 void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
690 __releases(&rb->s_lock)
692 lockdep_assert_held(&rb->s_lock);
695 spin_unlock_irqrestore(&rb->s_lock, *flags);
697 spin_unlock_irq(&rb->s_lock);
700 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
702 unsigned int sync, flush_point;
703 lockdep_assert_held(&rb->s_lock);
705 sync = READ_ONCE(rb->sync);
706 flush_point = READ_ONCE(rb->flush_point);
708 if (flush_point != EMPTY_ENTRY) {
709 unsigned int secs_to_flush;
711 secs_to_flush = pblk_rb_ring_count(flush_point, sync,
713 if (secs_to_flush < nr_entries) {
714 /* Protect flush points */
715 smp_store_release(&rb->flush_point, EMPTY_ENTRY);
719 sync = pblk_rb_ptr_wrap(rb, sync, nr_entries);
721 /* Protect from counts */
722 smp_store_release(&rb->sync, sync);
727 /* Calculate how many sectors to submit up to the current flush point. */
728 unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
730 unsigned int subm, sync, flush_point;
731 unsigned int submitted, to_flush;
733 /* Protect flush points */
734 flush_point = smp_load_acquire(&rb->flush_point);
735 if (flush_point == EMPTY_ENTRY)
739 sync = smp_load_acquire(&rb->sync);
741 subm = READ_ONCE(rb->subm);
742 submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
744 /* The sync point itself counts as a sector to sync */
745 to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
747 return (submitted < to_flush) ? (to_flush - submitted) : 0;
750 int pblk_rb_tear_down_check(struct pblk_rb *rb)
752 struct pblk_rb_entry *entry;
756 spin_lock(&rb->w_lock);
757 spin_lock_irq(&rb->s_lock);
759 if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
760 (rb->sync == rb->l2p_update) &&
761 (rb->flush_point == EMPTY_ENTRY)) {
770 for (i = 0; i < rb->nr_entries; i++) {
771 entry = &rb->entries[i];
780 spin_unlock(&rb->w_lock);
781 spin_unlock_irq(&rb->s_lock);
786 unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
788 return (pos & (rb->nr_entries - 1));
791 int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
793 return (pos >= rb->nr_entries);
796 ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
798 struct pblk *pblk = container_of(rb, struct pblk, rwb);
799 struct pblk_c_ctx *c;
801 int queued_entries = 0;
803 spin_lock_irq(&rb->s_lock);
804 list_for_each_entry(c, &pblk->compl_list, list)
806 spin_unlock_irq(&rb->s_lock);
808 if (rb->flush_point != EMPTY_ENTRY)
809 offset = scnprintf(buf, PAGE_SIZE,
810 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
816 #ifdef CONFIG_NVM_PBLK_DEBUG
817 atomic_read(&rb->inflight_flush_point),
822 pblk_rb_read_count(rb),
824 pblk_rb_flush_point_count(rb),
827 offset = scnprintf(buf, PAGE_SIZE,
828 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
834 #ifdef CONFIG_NVM_PBLK_DEBUG
835 atomic_read(&rb->inflight_flush_point),
839 pblk_rb_read_count(rb),
841 pblk_rb_flush_point_count(rb),