2 * Copyright (C) 2017 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-background-tracker.h"
9 /*----------------------------------------------------------------*/
11 #define DM_MSG_PREFIX "dm-background-tracker"
14 struct list_head list;
16 struct policy_work work;
19 struct background_tracker {
21 atomic_t pending_promotes;
22 atomic_t pending_writebacks;
23 atomic_t pending_demotes;
25 struct list_head issued;
26 struct list_head queued;
27 struct rb_root pending;
29 struct kmem_cache *work_cache;
32 struct background_tracker *btracker_create(unsigned max_work)
34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
36 b->max_work = max_work;
37 atomic_set(&b->pending_promotes, 0);
38 atomic_set(&b->pending_writebacks, 0);
39 atomic_set(&b->pending_demotes, 0);
41 INIT_LIST_HEAD(&b->issued);
42 INIT_LIST_HEAD(&b->queued);
45 b->work_cache = KMEM_CACHE(bt_work, 0);
47 DMERR("couldn't create mempool for background work items");
54 EXPORT_SYMBOL_GPL(btracker_create);
56 void btracker_destroy(struct background_tracker *b)
58 kmem_cache_destroy(b->work_cache);
61 EXPORT_SYMBOL_GPL(btracker_destroy);
63 static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs)
65 if (from_oblock(lhs) < from_oblock(rhs))
68 if (from_oblock(rhs) < from_oblock(lhs))
74 static bool __insert_pending(struct background_tracker *b,
79 struct rb_node **new = &b->pending.rb_node, *parent = NULL;
82 w = container_of(*new, struct bt_work, node);
85 cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
87 new = &((*new)->rb_left);
90 new = &((*new)->rb_right);
97 rb_link_node(&nw->node, parent, new);
98 rb_insert_color(&nw->node, &b->pending);
103 static struct bt_work *__find_pending(struct background_tracker *b,
108 struct rb_node **new = &b->pending.rb_node;
111 w = container_of(*new, struct bt_work, node);
113 cmp = cmp_oblock(w->work.oblock, oblock);
115 new = &((*new)->rb_left);
118 new = &((*new)->rb_right);
124 return *new ? w : NULL;
128 static void update_stats(struct background_tracker *b, struct policy_work *w, int delta)
132 atomic_add(delta, &b->pending_promotes);
136 atomic_add(delta, &b->pending_demotes);
139 case POLICY_WRITEBACK:
140 atomic_add(delta, &b->pending_writebacks);
145 unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
147 return atomic_read(&b->pending_writebacks);
149 EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
151 unsigned btracker_nr_demotions_queued(struct background_tracker *b)
153 return atomic_read(&b->pending_demotes);
155 EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
157 static bool max_work_reached(struct background_tracker *b)
163 int btracker_queue(struct background_tracker *b,
164 struct policy_work *work,
165 struct policy_work **pwork)
172 if (max_work_reached(b))
175 w = kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
179 memcpy(&w->work, work, sizeof(*work));
181 if (!__insert_pending(b, w)) {
183 * There was a race, we'll just ignore this second
184 * bit of work for the same oblock.
186 kmem_cache_free(b->work_cache, w);
192 list_add(&w->list, &b->issued);
194 list_add(&w->list, &b->queued);
195 update_stats(b, &w->work, 1);
199 EXPORT_SYMBOL_GPL(btracker_queue);
202 * Returns -ENODATA if there's no work.
204 int btracker_issue(struct background_tracker *b, struct policy_work **work)
208 if (list_empty(&b->queued))
211 w = list_first_entry(&b->queued, struct bt_work, list);
212 list_move(&w->list, &b->issued);
217 EXPORT_SYMBOL_GPL(btracker_issue);
219 void btracker_complete(struct background_tracker *b,
220 struct policy_work *op)
222 struct bt_work *w = container_of(op, struct bt_work, work);
224 update_stats(b, &w->work, -1);
225 rb_erase(&w->node, &b->pending);
227 kmem_cache_free(b->work_cache, w);
229 EXPORT_SYMBOL_GPL(btracker_complete);
231 bool btracker_promotion_already_present(struct background_tracker *b,
234 return __find_pending(b, oblock) != NULL;
236 EXPORT_SYMBOL_GPL(btracker_promotion_already_present);
238 /*----------------------------------------------------------------*/