2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
9 #include <linux/dm-bufio.h>
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/sched/mm.h>
15 #include <linux/jiffies.h>
16 #include <linux/vmalloc.h>
17 #include <linux/shrinker.h>
18 #include <linux/module.h>
19 #include <linux/rbtree.h>
20 #include <linux/stacktrace.h>
22 #define DM_MSG_PREFIX "bufio"
25 * Memory management policy:
26 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
32 #define DM_BUFIO_MIN_BUFFERS 8
34 #define DM_BUFIO_MEMORY_PERCENT 2
35 #define DM_BUFIO_VMALLOC_PERCENT 25
36 #define DM_BUFIO_WRITEBACK_PERCENT 75
39 * Check buffer ages in this interval (seconds)
41 #define DM_BUFIO_WORK_TIMER_SECS 30
44 * Free buffers when they are older than this (seconds)
46 #define DM_BUFIO_DEFAULT_AGE_SECS 300
49 * The nr of bytes of cached data to keep around.
51 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
54 * Align buffer writes to this boundary.
55 * Tests show that SSDs have the highest IOPS when using 4k writes.
57 #define DM_BUFIO_WRITE_ALIGN 4096
60 * dm_buffer->list_mode
68 * All buffers are linked to buffer_tree with their node field.
70 * Clean buffers that are not being written (B_WRITING not set)
71 * are linked to lru[LIST_CLEAN] with their lru_list field.
73 * Dirty and clean buffers that are being written are linked to
74 * lru[LIST_DIRTY] with their lru_list field. When the write
75 * finishes, the buffer cannot be relinked immediately (because we
76 * are in an interrupt context and relinking requires process
77 * context), so some clean-not-writing buffers can be held on
78 * dirty_lru too. They are later added to lru in the process
81 struct dm_bufio_client {
84 struct list_head lru[LIST_SIZE];
85 unsigned long n_buffers[LIST_SIZE];
87 struct block_device *bdev;
89 s8 sectors_per_block_bits;
90 void (*alloc_callback)(struct dm_buffer *);
91 void (*write_callback)(struct dm_buffer *);
93 struct kmem_cache *slab_buffer;
94 struct kmem_cache *slab_cache;
95 struct dm_io_client *dm_io;
97 struct list_head reserved_buffers;
98 unsigned need_reserved_buffers;
100 unsigned minimum_buffers;
102 struct rb_root buffer_tree;
103 wait_queue_head_t free_buffer_wait;
107 int async_write_error;
109 struct list_head client_list;
110 struct shrinker shrinker;
121 * Describes how the block was allocated:
122 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
123 * See the comment at alloc_buffer_data.
127 DATA_MODE_GET_FREE_PAGES = 1,
128 DATA_MODE_VMALLOC = 2,
134 struct list_head lru_list;
137 unsigned char data_mode; /* DATA_MODE_* */
138 unsigned char list_mode; /* LIST_* */
139 blk_status_t read_error;
140 blk_status_t write_error;
143 unsigned long last_accessed;
144 unsigned dirty_start;
146 unsigned write_start;
148 struct dm_bufio_client *c;
149 struct list_head write_list;
150 void (*end_io)(struct dm_buffer *, blk_status_t);
151 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
153 unsigned int stack_len;
154 unsigned long stack_entries[MAX_STACK];
158 /*----------------------------------------------------------------*/
160 #define dm_bufio_in_request() (!!current->bio_list)
162 static void dm_bufio_lock(struct dm_bufio_client *c)
164 mutex_lock_nested(&c->lock, dm_bufio_in_request());
167 static int dm_bufio_trylock(struct dm_bufio_client *c)
169 return mutex_trylock(&c->lock);
172 static void dm_bufio_unlock(struct dm_bufio_client *c)
174 mutex_unlock(&c->lock);
177 /*----------------------------------------------------------------*/
180 * Default cache size: available memory divided by the ratio.
182 static unsigned long dm_bufio_default_cache_size;
185 * Total cache size set by the user.
187 static unsigned long dm_bufio_cache_size;
190 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
191 * at any time. If it disagrees, the user has changed cache size.
193 static unsigned long dm_bufio_cache_size_latch;
195 static DEFINE_SPINLOCK(param_spinlock);
198 * Buffers are freed after this timeout
200 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
201 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
203 static unsigned long dm_bufio_peak_allocated;
204 static unsigned long dm_bufio_allocated_kmem_cache;
205 static unsigned long dm_bufio_allocated_get_free_pages;
206 static unsigned long dm_bufio_allocated_vmalloc;
207 static unsigned long dm_bufio_current_allocated;
209 /*----------------------------------------------------------------*/
212 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
214 static unsigned long dm_bufio_cache_size_per_client;
217 * The current number of clients.
219 static int dm_bufio_client_count;
222 * The list of all clients.
224 static LIST_HEAD(dm_bufio_all_clients);
227 * This mutex protects dm_bufio_cache_size_latch,
228 * dm_bufio_cache_size_per_client and dm_bufio_client_count
230 static DEFINE_MUTEX(dm_bufio_clients_lock);
232 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
233 static void buffer_record_stack(struct dm_buffer *b)
235 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
239 /*----------------------------------------------------------------
240 * A red/black tree acts as an index for all the buffers.
241 *--------------------------------------------------------------*/
242 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
244 struct rb_node *n = c->buffer_tree.rb_node;
248 b = container_of(n, struct dm_buffer, node);
250 if (b->block == block)
253 n = (b->block < block) ? n->rb_left : n->rb_right;
259 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
261 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
262 struct dm_buffer *found;
265 found = container_of(*new, struct dm_buffer, node);
267 if (found->block == b->block) {
273 new = (found->block < b->block) ?
274 &((*new)->rb_left) : &((*new)->rb_right);
277 rb_link_node(&b->node, parent, new);
278 rb_insert_color(&b->node, &c->buffer_tree);
281 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
283 rb_erase(&b->node, &c->buffer_tree);
286 /*----------------------------------------------------------------*/
288 static void adjust_total_allocated(unsigned char data_mode, long diff)
290 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
291 &dm_bufio_allocated_kmem_cache,
292 &dm_bufio_allocated_get_free_pages,
293 &dm_bufio_allocated_vmalloc,
296 spin_lock(¶m_spinlock);
298 *class_ptr[data_mode] += diff;
300 dm_bufio_current_allocated += diff;
302 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
303 dm_bufio_peak_allocated = dm_bufio_current_allocated;
305 spin_unlock(¶m_spinlock);
309 * Change the number of clients and recalculate per-client limit.
311 static void __cache_size_refresh(void)
313 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
314 BUG_ON(dm_bufio_client_count < 0);
316 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
319 * Use default if set to 0 and report the actual cache size used.
321 if (!dm_bufio_cache_size_latch) {
322 (void)cmpxchg(&dm_bufio_cache_size, 0,
323 dm_bufio_default_cache_size);
324 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
327 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
328 (dm_bufio_client_count ? : 1);
332 * Allocating buffer data.
334 * Small buffers are allocated with kmem_cache, to use space optimally.
336 * For large buffers, we choose between get_free_pages and vmalloc.
337 * Each has advantages and disadvantages.
339 * __get_free_pages can randomly fail if the memory is fragmented.
340 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
341 * as low as 128M) so using it for caching is not appropriate.
343 * If the allocation may fail we use __get_free_pages. Memory fragmentation
344 * won't have a fatal effect here, but it just causes flushes of some other
345 * buffers and more I/O will be performed. Don't use __get_free_pages if it
346 * always fails (i.e. order >= MAX_ORDER).
348 * If the allocation shouldn't fail we use __vmalloc. This is only for the
349 * initial reserve allocation, so there's no risk of wasting all vmalloc
352 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
353 unsigned char *data_mode)
355 if (unlikely(c->slab_cache != NULL)) {
356 *data_mode = DATA_MODE_SLAB;
357 return kmem_cache_alloc(c->slab_cache, gfp_mask);
360 if (c->block_size <= KMALLOC_MAX_SIZE &&
361 gfp_mask & __GFP_NORETRY) {
362 *data_mode = DATA_MODE_GET_FREE_PAGES;
363 return (void *)__get_free_pages(gfp_mask,
364 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
367 *data_mode = DATA_MODE_VMALLOC;
370 * __vmalloc allocates the data pages and auxiliary structures with
371 * gfp_flags that were specified, but pagetables are always allocated
372 * with GFP_KERNEL, no matter what was specified as gfp_mask.
374 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
375 * all allocations done by this process (including pagetables) are done
376 * as if GFP_NOIO was specified.
378 if (gfp_mask & __GFP_NORETRY) {
379 unsigned noio_flag = memalloc_noio_save();
380 void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
382 memalloc_noio_restore(noio_flag);
386 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
390 * Free buffer's data.
392 static void free_buffer_data(struct dm_bufio_client *c,
393 void *data, unsigned char data_mode)
397 kmem_cache_free(c->slab_cache, data);
400 case DATA_MODE_GET_FREE_PAGES:
401 free_pages((unsigned long)data,
402 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
405 case DATA_MODE_VMALLOC:
410 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
417 * Allocate buffer and its data.
419 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
421 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
428 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
430 kmem_cache_free(c->slab_buffer, b);
434 adjust_total_allocated(b->data_mode, (long)c->block_size);
436 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
443 * Free buffer and its data.
445 static void free_buffer(struct dm_buffer *b)
447 struct dm_bufio_client *c = b->c;
449 adjust_total_allocated(b->data_mode, -(long)c->block_size);
451 free_buffer_data(c, b->data, b->data_mode);
452 kmem_cache_free(c->slab_buffer, b);
456 * Link buffer to the buffer tree and clean or dirty queue.
458 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
460 struct dm_bufio_client *c = b->c;
462 c->n_buffers[dirty]++;
464 b->list_mode = dirty;
465 list_add(&b->lru_list, &c->lru[dirty]);
467 b->last_accessed = jiffies;
471 * Unlink buffer from the buffer tree and dirty or clean queue.
473 static void __unlink_buffer(struct dm_buffer *b)
475 struct dm_bufio_client *c = b->c;
477 BUG_ON(!c->n_buffers[b->list_mode]);
479 c->n_buffers[b->list_mode]--;
481 list_del(&b->lru_list);
485 * Place the buffer to the head of dirty or clean LRU queue.
487 static void __relink_lru(struct dm_buffer *b, int dirty)
489 struct dm_bufio_client *c = b->c;
491 BUG_ON(!c->n_buffers[b->list_mode]);
493 c->n_buffers[b->list_mode]--;
494 c->n_buffers[dirty]++;
495 b->list_mode = dirty;
496 list_move(&b->lru_list, &c->lru[dirty]);
497 b->last_accessed = jiffies;
500 /*----------------------------------------------------------------
501 * Submit I/O on the buffer.
503 * Bio interface is faster but it has some problems:
504 * the vector list is limited (increasing this limit increases
505 * memory-consumption per buffer, so it is not viable);
507 * the memory must be direct-mapped, not vmalloced;
509 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
510 * it is not vmalloced, try using the bio interface.
512 * If the buffer is big, if it is vmalloced or if the underlying device
513 * rejects the bio because it is too large, use dm-io layer to do the I/O.
514 * The dm-io layer splits the I/O into multiple requests, avoiding the above
516 *--------------------------------------------------------------*/
519 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
520 * that the request was handled directly with bio interface.
522 static void dmio_complete(unsigned long error, void *context)
524 struct dm_buffer *b = context;
526 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
529 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
530 unsigned n_sectors, unsigned offset)
533 struct dm_io_request io_req = {
536 .notify.fn = dmio_complete,
538 .client = b->c->dm_io,
540 struct dm_io_region region = {
546 if (b->data_mode != DATA_MODE_VMALLOC) {
547 io_req.mem.type = DM_IO_KMEM;
548 io_req.mem.ptr.addr = (char *)b->data + offset;
550 io_req.mem.type = DM_IO_VMA;
551 io_req.mem.ptr.vma = (char *)b->data + offset;
554 r = dm_io(&io_req, 1, ®ion, NULL);
556 b->end_io(b, errno_to_blk_status(r));
559 static void bio_complete(struct bio *bio)
561 struct dm_buffer *b = bio->bi_private;
562 blk_status_t status = bio->bi_status;
564 b->end_io(b, status);
567 static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
568 unsigned n_sectors, unsigned offset)
572 unsigned vec_size, len;
574 vec_size = b->c->block_size >> PAGE_SHIFT;
575 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
578 bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
581 use_dmio(b, rw, sector, n_sectors, offset);
585 bio->bi_iter.bi_sector = sector;
586 bio_set_dev(bio, b->c->bdev);
587 bio_set_op_attrs(bio, rw, 0);
588 bio->bi_end_io = bio_complete;
591 ptr = (char *)b->data + offset;
592 len = n_sectors << SECTOR_SHIFT;
595 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
596 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
597 offset_in_page(ptr))) {
609 static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
613 unsigned offset, end;
617 if (likely(b->c->sectors_per_block_bits >= 0))
618 sector = b->block << b->c->sectors_per_block_bits;
620 sector = b->block * (b->c->block_size >> SECTOR_SHIFT);
621 sector += b->c->start;
623 if (rw != REQ_OP_WRITE) {
624 n_sectors = b->c->block_size >> SECTOR_SHIFT;
627 if (b->c->write_callback)
628 b->c->write_callback(b);
629 offset = b->write_start;
631 offset &= -DM_BUFIO_WRITE_ALIGN;
632 end += DM_BUFIO_WRITE_ALIGN - 1;
633 end &= -DM_BUFIO_WRITE_ALIGN;
634 if (unlikely(end > b->c->block_size))
635 end = b->c->block_size;
637 sector += offset >> SECTOR_SHIFT;
638 n_sectors = (end - offset) >> SECTOR_SHIFT;
641 if (b->data_mode != DATA_MODE_VMALLOC)
642 use_bio(b, rw, sector, n_sectors, offset);
644 use_dmio(b, rw, sector, n_sectors, offset);
647 /*----------------------------------------------------------------
648 * Writing dirty buffers
649 *--------------------------------------------------------------*/
652 * The endio routine for write.
654 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
657 static void write_endio(struct dm_buffer *b, blk_status_t status)
659 b->write_error = status;
660 if (unlikely(status)) {
661 struct dm_bufio_client *c = b->c;
663 (void)cmpxchg(&c->async_write_error, 0,
664 blk_status_to_errno(status));
667 BUG_ON(!test_bit(B_WRITING, &b->state));
669 smp_mb__before_atomic();
670 clear_bit(B_WRITING, &b->state);
671 smp_mb__after_atomic();
673 wake_up_bit(&b->state, B_WRITING);
677 * Initiate a write on a dirty buffer, but don't wait for it.
679 * - If the buffer is not dirty, exit.
680 * - If there some previous write going on, wait for it to finish (we can't
681 * have two writes on the same buffer simultaneously).
682 * - Submit our write and don't wait on it. We set B_WRITING indicating
683 * that there is a write in progress.
685 static void __write_dirty_buffer(struct dm_buffer *b,
686 struct list_head *write_list)
688 if (!test_bit(B_DIRTY, &b->state))
691 clear_bit(B_DIRTY, &b->state);
692 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
694 b->write_start = b->dirty_start;
695 b->write_end = b->dirty_end;
698 submit_io(b, REQ_OP_WRITE, write_endio);
700 list_add_tail(&b->write_list, write_list);
703 static void __flush_write_list(struct list_head *write_list)
705 struct blk_plug plug;
706 blk_start_plug(&plug);
707 while (!list_empty(write_list)) {
708 struct dm_buffer *b =
709 list_entry(write_list->next, struct dm_buffer, write_list);
710 list_del(&b->write_list);
711 submit_io(b, REQ_OP_WRITE, write_endio);
714 blk_finish_plug(&plug);
718 * Wait until any activity on the buffer finishes. Possibly write the
719 * buffer if it is dirty. When this function finishes, there is no I/O
720 * running on the buffer and the buffer is not dirty.
722 static void __make_buffer_clean(struct dm_buffer *b)
724 BUG_ON(b->hold_count);
726 if (!b->state) /* fast case */
729 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
730 __write_dirty_buffer(b, NULL);
731 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
735 * Find some buffer that is not held by anybody, clean it, unlink it and
738 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
742 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
743 BUG_ON(test_bit(B_WRITING, &b->state));
744 BUG_ON(test_bit(B_DIRTY, &b->state));
746 if (!b->hold_count) {
747 __make_buffer_clean(b);
754 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
755 BUG_ON(test_bit(B_READING, &b->state));
757 if (!b->hold_count) {
758 __make_buffer_clean(b);
769 * Wait until some other threads free some buffer or release hold count on
772 * This function is entered with c->lock held, drops it and regains it
775 static void __wait_for_free_buffer(struct dm_bufio_client *c)
777 DECLARE_WAITQUEUE(wait, current);
779 add_wait_queue(&c->free_buffer_wait, &wait);
780 set_current_state(TASK_UNINTERRUPTIBLE);
785 remove_wait_queue(&c->free_buffer_wait, &wait);
798 * Allocate a new buffer. If the allocation is not possible, wait until
799 * some other thread frees a buffer.
801 * May drop the lock and regain it.
803 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
806 bool tried_noio_alloc = false;
809 * dm-bufio is resistant to allocation failures (it just keeps
810 * one buffer reserved in cases all the allocations fail).
811 * So set flags to not try too hard:
812 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
813 * mutex and wait ourselves.
814 * __GFP_NORETRY: don't retry and rather return failure
815 * __GFP_NOMEMALLOC: don't use emergency reserves
816 * __GFP_NOWARN: don't print a warning in case of failure
818 * For debugging, if we set the cache size to 1, no new buffers will
822 if (dm_bufio_cache_size_latch != 1) {
823 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
828 if (nf == NF_PREFETCH)
831 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
833 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
837 tried_noio_alloc = true;
840 if (!list_empty(&c->reserved_buffers)) {
841 b = list_entry(c->reserved_buffers.next,
842 struct dm_buffer, lru_list);
843 list_del(&b->lru_list);
844 c->need_reserved_buffers++;
849 b = __get_unclaimed_buffer(c);
853 __wait_for_free_buffer(c);
857 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
859 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
864 if (c->alloc_callback)
865 c->alloc_callback(b);
871 * Free a buffer and wake other threads waiting for free buffers.
873 static void __free_buffer_wake(struct dm_buffer *b)
875 struct dm_bufio_client *c = b->c;
877 if (!c->need_reserved_buffers)
880 list_add(&b->lru_list, &c->reserved_buffers);
881 c->need_reserved_buffers--;
884 wake_up(&c->free_buffer_wait);
887 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
888 struct list_head *write_list)
890 struct dm_buffer *b, *tmp;
892 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
893 BUG_ON(test_bit(B_READING, &b->state));
895 if (!test_bit(B_DIRTY, &b->state) &&
896 !test_bit(B_WRITING, &b->state)) {
897 __relink_lru(b, LIST_CLEAN);
901 if (no_wait && test_bit(B_WRITING, &b->state))
904 __write_dirty_buffer(b, write_list);
910 * Get writeback threshold and buffer limit for a given client.
912 static void __get_memory_limit(struct dm_bufio_client *c,
913 unsigned long *threshold_buffers,
914 unsigned long *limit_buffers)
916 unsigned long buffers;
918 if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
919 if (mutex_trylock(&dm_bufio_clients_lock)) {
920 __cache_size_refresh();
921 mutex_unlock(&dm_bufio_clients_lock);
925 buffers = dm_bufio_cache_size_per_client;
926 if (likely(c->sectors_per_block_bits >= 0))
927 buffers >>= c->sectors_per_block_bits + SECTOR_SHIFT;
929 buffers /= c->block_size;
931 if (buffers < c->minimum_buffers)
932 buffers = c->minimum_buffers;
934 *limit_buffers = buffers;
935 *threshold_buffers = mult_frac(buffers,
936 DM_BUFIO_WRITEBACK_PERCENT, 100);
940 * Check if we're over watermark.
941 * If we are over threshold_buffers, start freeing buffers.
942 * If we're over "limit_buffers", block until we get under the limit.
944 static void __check_watermark(struct dm_bufio_client *c,
945 struct list_head *write_list)
947 unsigned long threshold_buffers, limit_buffers;
949 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
951 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
954 struct dm_buffer *b = __get_unclaimed_buffer(c);
959 __free_buffer_wake(b);
963 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
964 __write_dirty_buffers_async(c, 1, write_list);
967 /*----------------------------------------------------------------
969 *--------------------------------------------------------------*/
971 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
972 enum new_flag nf, int *need_submit,
973 struct list_head *write_list)
975 struct dm_buffer *b, *new_b = NULL;
979 b = __find(c, block);
986 new_b = __alloc_buffer_wait(c, nf);
991 * We've had a period where the mutex was unlocked, so need to
992 * recheck the buffer tree.
994 b = __find(c, block);
996 __free_buffer_wake(new_b);
1000 __check_watermark(c, write_list);
1006 __link_buffer(b, block, LIST_CLEAN);
1008 if (nf == NF_FRESH) {
1013 b->state = 1 << B_READING;
1019 if (nf == NF_PREFETCH)
1022 * Note: it is essential that we don't wait for the buffer to be
1023 * read if dm_bufio_get function is used. Both dm_bufio_get and
1024 * dm_bufio_prefetch can be used in the driver request routine.
1025 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1026 * the same buffer, it would deadlock if we waited.
1028 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1032 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1033 test_bit(B_WRITING, &b->state));
1038 * The endio routine for reading: set the error, clear the bit and wake up
1039 * anyone waiting on the buffer.
1041 static void read_endio(struct dm_buffer *b, blk_status_t status)
1043 b->read_error = status;
1045 BUG_ON(!test_bit(B_READING, &b->state));
1047 smp_mb__before_atomic();
1048 clear_bit(B_READING, &b->state);
1049 smp_mb__after_atomic();
1051 wake_up_bit(&b->state, B_READING);
1055 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1056 * functions is similar except that dm_bufio_new doesn't read the
1057 * buffer from the disk (assuming that the caller overwrites all the data
1058 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1060 static void *new_read(struct dm_bufio_client *c, sector_t block,
1061 enum new_flag nf, struct dm_buffer **bp)
1064 struct dm_buffer *b;
1066 LIST_HEAD(write_list);
1069 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1070 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1071 if (b && b->hold_count == 1)
1072 buffer_record_stack(b);
1076 __flush_write_list(&write_list);
1082 submit_io(b, REQ_OP_READ, read_endio);
1084 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1086 if (b->read_error) {
1087 int error = blk_status_to_errno(b->read_error);
1089 dm_bufio_release(b);
1091 return ERR_PTR(error);
1099 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1100 struct dm_buffer **bp)
1102 return new_read(c, block, NF_GET, bp);
1104 EXPORT_SYMBOL_GPL(dm_bufio_get);
1106 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1107 struct dm_buffer **bp)
1109 BUG_ON(dm_bufio_in_request());
1111 return new_read(c, block, NF_READ, bp);
1113 EXPORT_SYMBOL_GPL(dm_bufio_read);
1115 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1116 struct dm_buffer **bp)
1118 BUG_ON(dm_bufio_in_request());
1120 return new_read(c, block, NF_FRESH, bp);
1122 EXPORT_SYMBOL_GPL(dm_bufio_new);
1124 void dm_bufio_prefetch(struct dm_bufio_client *c,
1125 sector_t block, unsigned n_blocks)
1127 struct blk_plug plug;
1129 LIST_HEAD(write_list);
1131 BUG_ON(dm_bufio_in_request());
1133 blk_start_plug(&plug);
1136 for (; n_blocks--; block++) {
1138 struct dm_buffer *b;
1139 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1141 if (unlikely(!list_empty(&write_list))) {
1143 blk_finish_plug(&plug);
1144 __flush_write_list(&write_list);
1145 blk_start_plug(&plug);
1148 if (unlikely(b != NULL)) {
1152 submit_io(b, REQ_OP_READ, read_endio);
1153 dm_bufio_release(b);
1166 blk_finish_plug(&plug);
1168 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1170 void dm_bufio_release(struct dm_buffer *b)
1172 struct dm_bufio_client *c = b->c;
1176 BUG_ON(!b->hold_count);
1179 if (!b->hold_count) {
1180 wake_up(&c->free_buffer_wait);
1183 * If there were errors on the buffer, and the buffer is not
1184 * to be written, free the buffer. There is no point in caching
1187 if ((b->read_error || b->write_error) &&
1188 !test_bit(B_READING, &b->state) &&
1189 !test_bit(B_WRITING, &b->state) &&
1190 !test_bit(B_DIRTY, &b->state)) {
1192 __free_buffer_wake(b);
1198 EXPORT_SYMBOL_GPL(dm_bufio_release);
1200 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1201 unsigned start, unsigned end)
1203 struct dm_bufio_client *c = b->c;
1205 BUG_ON(start >= end);
1206 BUG_ON(end > b->c->block_size);
1210 BUG_ON(test_bit(B_READING, &b->state));
1212 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1213 b->dirty_start = start;
1215 __relink_lru(b, LIST_DIRTY);
1217 if (start < b->dirty_start)
1218 b->dirty_start = start;
1219 if (end > b->dirty_end)
1225 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1227 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1229 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1231 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1233 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1235 LIST_HEAD(write_list);
1237 BUG_ON(dm_bufio_in_request());
1240 __write_dirty_buffers_async(c, 0, &write_list);
1242 __flush_write_list(&write_list);
1244 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1247 * For performance, it is essential that the buffers are written asynchronously
1248 * and simultaneously (so that the block layer can merge the writes) and then
1251 * Finally, we flush hardware disk cache.
1253 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1256 unsigned long buffers_processed = 0;
1257 struct dm_buffer *b, *tmp;
1259 LIST_HEAD(write_list);
1262 __write_dirty_buffers_async(c, 0, &write_list);
1264 __flush_write_list(&write_list);
1268 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1269 int dropped_lock = 0;
1271 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1272 buffers_processed++;
1274 BUG_ON(test_bit(B_READING, &b->state));
1276 if (test_bit(B_WRITING, &b->state)) {
1277 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1281 wait_on_bit_io(&b->state, B_WRITING,
1282 TASK_UNINTERRUPTIBLE);
1286 wait_on_bit_io(&b->state, B_WRITING,
1287 TASK_UNINTERRUPTIBLE);
1290 if (!test_bit(B_DIRTY, &b->state) &&
1291 !test_bit(B_WRITING, &b->state))
1292 __relink_lru(b, LIST_CLEAN);
1297 * If we dropped the lock, the list is no longer consistent,
1298 * so we must restart the search.
1300 * In the most common case, the buffer just processed is
1301 * relinked to the clean list, so we won't loop scanning the
1302 * same buffer again and again.
1304 * This may livelock if there is another thread simultaneously
1305 * dirtying buffers, so we count the number of buffers walked
1306 * and if it exceeds the total number of buffers, it means that
1307 * someone is doing some writes simultaneously with us. In
1308 * this case, stop, dropping the lock.
1313 wake_up(&c->free_buffer_wait);
1316 a = xchg(&c->async_write_error, 0);
1317 f = dm_bufio_issue_flush(c);
1323 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1326 * Use dm-io to send an empty barrier to flush the device.
1328 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1330 struct dm_io_request io_req = {
1331 .bi_op = REQ_OP_WRITE,
1332 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1333 .mem.type = DM_IO_KMEM,
1334 .mem.ptr.addr = NULL,
1337 struct dm_io_region io_reg = {
1343 BUG_ON(dm_bufio_in_request());
1345 return dm_io(&io_req, 1, &io_reg, NULL);
1347 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1350 * We first delete any other buffer that may be at that new location.
1352 * Then, we write the buffer to the original location if it was dirty.
1354 * Then, if we are the only one who is holding the buffer, relink the buffer
1355 * in the buffer tree for the new location.
1357 * If there was someone else holding the buffer, we write it to the new
1358 * location but not relink it, because that other user needs to have the buffer
1359 * at the same place.
1361 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1363 struct dm_bufio_client *c = b->c;
1364 struct dm_buffer *new;
1366 BUG_ON(dm_bufio_in_request());
1371 new = __find(c, new_block);
1373 if (new->hold_count) {
1374 __wait_for_free_buffer(c);
1379 * FIXME: Is there any point waiting for a write that's going
1380 * to be overwritten in a bit?
1382 __make_buffer_clean(new);
1383 __unlink_buffer(new);
1384 __free_buffer_wake(new);
1387 BUG_ON(!b->hold_count);
1388 BUG_ON(test_bit(B_READING, &b->state));
1390 __write_dirty_buffer(b, NULL);
1391 if (b->hold_count == 1) {
1392 wait_on_bit_io(&b->state, B_WRITING,
1393 TASK_UNINTERRUPTIBLE);
1394 set_bit(B_DIRTY, &b->state);
1396 b->dirty_end = c->block_size;
1398 __link_buffer(b, new_block, LIST_DIRTY);
1401 wait_on_bit_lock_io(&b->state, B_WRITING,
1402 TASK_UNINTERRUPTIBLE);
1404 * Relink buffer to "new_block" so that write_callback
1405 * sees "new_block" as a block number.
1406 * After the write, link the buffer back to old_block.
1407 * All this must be done in bufio lock, so that block number
1408 * change isn't visible to other threads.
1410 old_block = b->block;
1412 __link_buffer(b, new_block, b->list_mode);
1413 submit_io(b, REQ_OP_WRITE, write_endio);
1414 wait_on_bit_io(&b->state, B_WRITING,
1415 TASK_UNINTERRUPTIBLE);
1417 __link_buffer(b, old_block, b->list_mode);
1421 dm_bufio_release(b);
1423 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1426 * Free the given buffer.
1428 * This is just a hint, if the buffer is in use or dirty, this function
1431 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1433 struct dm_buffer *b;
1437 b = __find(c, block);
1438 if (b && likely(!b->hold_count) && likely(!b->state)) {
1440 __free_buffer_wake(b);
1445 EXPORT_SYMBOL_GPL(dm_bufio_forget);
1447 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1449 c->minimum_buffers = n;
1451 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1453 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1455 return c->block_size;
1457 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1459 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1461 sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
1462 if (likely(c->sectors_per_block_bits >= 0))
1463 s >>= c->sectors_per_block_bits;
1465 sector_div(s, c->block_size >> SECTOR_SHIFT);
1468 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1470 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1474 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1476 void *dm_bufio_get_block_data(struct dm_buffer *b)
1480 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1482 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1486 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1488 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1492 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1494 static void drop_buffers(struct dm_bufio_client *c)
1496 struct dm_buffer *b;
1498 bool warned = false;
1500 BUG_ON(dm_bufio_in_request());
1503 * An optimization so that the buffers are not written one-by-one.
1505 dm_bufio_write_dirty_buffers_async(c);
1509 while ((b = __get_unclaimed_buffer(c)))
1510 __free_buffer_wake(b);
1512 for (i = 0; i < LIST_SIZE; i++)
1513 list_for_each_entry(b, &c->lru[i], lru_list) {
1516 DMERR("leaked buffer %llx, hold count %u, list %d",
1517 (unsigned long long)b->block, b->hold_count, i);
1518 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1519 stack_trace_print(b->stack_entries, b->stack_len, 1);
1520 /* mark unclaimed to avoid BUG_ON below */
1525 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1526 while ((b = __get_unclaimed_buffer(c)))
1527 __free_buffer_wake(b);
1530 for (i = 0; i < LIST_SIZE; i++)
1531 BUG_ON(!list_empty(&c->lru[i]));
1537 * We may not be able to evict this buffer if IO pending or the client
1538 * is still using it. Caller is expected to know buffer is too old.
1540 * And if GFP_NOFS is used, we must not do any I/O because we hold
1541 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1542 * rerouted to different bufio client.
1544 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1546 if (!(gfp & __GFP_FS)) {
1547 if (test_bit(B_READING, &b->state) ||
1548 test_bit(B_WRITING, &b->state) ||
1549 test_bit(B_DIRTY, &b->state))
1556 __make_buffer_clean(b);
1558 __free_buffer_wake(b);
1563 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1565 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1566 if (likely(c->sectors_per_block_bits >= 0))
1567 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1569 retain_bytes /= c->block_size;
1570 return retain_bytes;
1573 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1577 struct dm_buffer *b, *tmp;
1578 unsigned long freed = 0;
1579 unsigned long count = c->n_buffers[LIST_CLEAN] +
1580 c->n_buffers[LIST_DIRTY];
1581 unsigned long retain_target = get_retain_buffers(c);
1583 for (l = 0; l < LIST_SIZE; l++) {
1584 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1585 if (__try_evict_buffer(b, gfp_mask))
1587 if (!--nr_to_scan || ((count - freed) <= retain_target))
1595 static unsigned long
1596 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1598 struct dm_bufio_client *c;
1599 unsigned long freed;
1601 c = container_of(shrink, struct dm_bufio_client, shrinker);
1602 if (!dm_bufio_trylock(c))
1605 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1610 static unsigned long
1611 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1613 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1614 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1615 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1616 unsigned long retain_target = get_retain_buffers(c);
1618 return (count < retain_target) ? 0 : (count - retain_target);
1622 * Create the buffering interface
1624 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1625 unsigned reserved_buffers, unsigned aux_size,
1626 void (*alloc_callback)(struct dm_buffer *),
1627 void (*write_callback)(struct dm_buffer *))
1630 struct dm_bufio_client *c;
1634 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1635 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1640 c = kzalloc(sizeof(*c), GFP_KERNEL);
1645 c->buffer_tree = RB_ROOT;
1648 c->block_size = block_size;
1649 if (is_power_of_2(block_size))
1650 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1652 c->sectors_per_block_bits = -1;
1654 c->alloc_callback = alloc_callback;
1655 c->write_callback = write_callback;
1657 for (i = 0; i < LIST_SIZE; i++) {
1658 INIT_LIST_HEAD(&c->lru[i]);
1659 c->n_buffers[i] = 0;
1662 mutex_init(&c->lock);
1663 INIT_LIST_HEAD(&c->reserved_buffers);
1664 c->need_reserved_buffers = reserved_buffers;
1666 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1668 init_waitqueue_head(&c->free_buffer_wait);
1669 c->async_write_error = 0;
1671 c->dm_io = dm_io_client_create();
1672 if (IS_ERR(c->dm_io)) {
1673 r = PTR_ERR(c->dm_io);
1677 if (block_size <= KMALLOC_MAX_SIZE &&
1678 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1679 unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1680 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1681 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1682 SLAB_RECLAIM_ACCOUNT, NULL);
1683 if (!c->slab_cache) {
1689 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1691 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1692 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1693 0, SLAB_RECLAIM_ACCOUNT, NULL);
1694 if (!c->slab_buffer) {
1699 while (c->need_reserved_buffers) {
1700 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1706 __free_buffer_wake(b);
1709 c->shrinker.count_objects = dm_bufio_shrink_count;
1710 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1711 c->shrinker.seeks = 1;
1712 c->shrinker.batch = 0;
1713 r = register_shrinker(&c->shrinker);
1717 mutex_lock(&dm_bufio_clients_lock);
1718 dm_bufio_client_count++;
1719 list_add(&c->client_list, &dm_bufio_all_clients);
1720 __cache_size_refresh();
1721 mutex_unlock(&dm_bufio_clients_lock);
1726 while (!list_empty(&c->reserved_buffers)) {
1727 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1728 struct dm_buffer, lru_list);
1729 list_del(&b->lru_list);
1732 kmem_cache_destroy(c->slab_cache);
1733 kmem_cache_destroy(c->slab_buffer);
1734 dm_io_client_destroy(c->dm_io);
1736 mutex_destroy(&c->lock);
1741 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1744 * Free the buffering interface.
1745 * It is required that there are no references on any buffers.
1747 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1753 unregister_shrinker(&c->shrinker);
1755 mutex_lock(&dm_bufio_clients_lock);
1757 list_del(&c->client_list);
1758 dm_bufio_client_count--;
1759 __cache_size_refresh();
1761 mutex_unlock(&dm_bufio_clients_lock);
1763 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1764 BUG_ON(c->need_reserved_buffers);
1766 while (!list_empty(&c->reserved_buffers)) {
1767 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1768 struct dm_buffer, lru_list);
1769 list_del(&b->lru_list);
1773 for (i = 0; i < LIST_SIZE; i++)
1774 if (c->n_buffers[i])
1775 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1777 for (i = 0; i < LIST_SIZE; i++)
1778 BUG_ON(c->n_buffers[i]);
1780 kmem_cache_destroy(c->slab_cache);
1781 kmem_cache_destroy(c->slab_buffer);
1782 dm_io_client_destroy(c->dm_io);
1783 mutex_destroy(&c->lock);
1786 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1788 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1792 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1794 static unsigned get_max_age_hz(void)
1796 unsigned max_age = READ_ONCE(dm_bufio_max_age);
1798 if (max_age > UINT_MAX / HZ)
1799 max_age = UINT_MAX / HZ;
1801 return max_age * HZ;
1804 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1806 return time_after_eq(jiffies, b->last_accessed + age_hz);
1809 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1811 struct dm_buffer *b, *tmp;
1812 unsigned long retain_target = get_retain_buffers(c);
1813 unsigned long count;
1814 LIST_HEAD(write_list);
1818 __check_watermark(c, &write_list);
1819 if (unlikely(!list_empty(&write_list))) {
1821 __flush_write_list(&write_list);
1825 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1826 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1827 if (count <= retain_target)
1830 if (!older_than(b, age_hz))
1833 if (__try_evict_buffer(b, 0))
1842 static void cleanup_old_buffers(void)
1844 unsigned long max_age_hz = get_max_age_hz();
1845 struct dm_bufio_client *c;
1847 mutex_lock(&dm_bufio_clients_lock);
1849 __cache_size_refresh();
1851 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1852 __evict_old_buffers(c, max_age_hz);
1854 mutex_unlock(&dm_bufio_clients_lock);
1857 static struct workqueue_struct *dm_bufio_wq;
1858 static struct delayed_work dm_bufio_work;
1860 static void work_fn(struct work_struct *w)
1862 cleanup_old_buffers();
1864 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1865 DM_BUFIO_WORK_TIMER_SECS * HZ);
1868 /*----------------------------------------------------------------
1870 *--------------------------------------------------------------*/
1873 * This is called only once for the whole dm_bufio module.
1874 * It initializes memory limit.
1876 static int __init dm_bufio_init(void)
1880 dm_bufio_allocated_kmem_cache = 0;
1881 dm_bufio_allocated_get_free_pages = 0;
1882 dm_bufio_allocated_vmalloc = 0;
1883 dm_bufio_current_allocated = 0;
1885 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
1886 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1888 if (mem > ULONG_MAX)
1892 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1893 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1896 dm_bufio_default_cache_size = mem;
1898 mutex_lock(&dm_bufio_clients_lock);
1899 __cache_size_refresh();
1900 mutex_unlock(&dm_bufio_clients_lock);
1902 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1906 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1907 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1908 DM_BUFIO_WORK_TIMER_SECS * HZ);
1914 * This is called once when unloading the dm_bufio module.
1916 static void __exit dm_bufio_exit(void)
1920 cancel_delayed_work_sync(&dm_bufio_work);
1921 destroy_workqueue(dm_bufio_wq);
1923 if (dm_bufio_client_count) {
1924 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1925 __func__, dm_bufio_client_count);
1929 if (dm_bufio_current_allocated) {
1930 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1931 __func__, dm_bufio_current_allocated);
1935 if (dm_bufio_allocated_get_free_pages) {
1936 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1937 __func__, dm_bufio_allocated_get_free_pages);
1941 if (dm_bufio_allocated_vmalloc) {
1942 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1943 __func__, dm_bufio_allocated_vmalloc);
1950 module_init(dm_bufio_init)
1951 module_exit(dm_bufio_exit)
1953 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1954 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1956 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1957 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1959 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1960 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1962 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1963 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1965 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1966 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1968 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1969 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1971 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1972 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1974 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1975 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1977 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1978 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1979 MODULE_LICENSE("GPL");