1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/jiffies.h>
4 #include <linux/kernel.h>
5 #include <linux/ktime.h>
6 #include <linux/list.h>
7 #include <linux/sizes.h>
8 #include <linux/workqueue.h>
10 #include "block-group.h"
12 #include "free-space-cache.h"
14 /* This is an initial delay to give some chance for block reuse */
15 #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
17 static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
18 struct btrfs_block_group *block_group)
20 return &discard_ctl->discard_list[block_group->discard_index];
23 static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
24 struct btrfs_block_group *block_group)
26 spin_lock(&discard_ctl->lock);
28 if (!btrfs_run_discard_work(discard_ctl)) {
29 spin_unlock(&discard_ctl->lock);
33 if (list_empty(&block_group->discard_list))
34 block_group->discard_eligible_time = (ktime_get_ns() +
37 list_move_tail(&block_group->discard_list,
38 get_discard_list(discard_ctl, block_group));
40 spin_unlock(&discard_ctl->lock);
43 static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
44 struct btrfs_block_group *block_group)
48 spin_lock(&discard_ctl->lock);
50 if (block_group == discard_ctl->block_group) {
52 discard_ctl->block_group = NULL;
55 block_group->discard_eligible_time = 0;
56 list_del_init(&block_group->discard_list);
58 spin_unlock(&discard_ctl->lock);
64 * find_next_block_group - find block_group that's up next for discarding
65 * @discard_ctl: discard control
68 * Iterate over the discard lists to find the next block_group up for
69 * discarding checking the discard_eligible_time of block_group.
71 static struct btrfs_block_group *find_next_block_group(
72 struct btrfs_discard_ctl *discard_ctl,
75 struct btrfs_block_group *ret_block_group = NULL, *block_group;
78 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
79 struct list_head *discard_list = &discard_ctl->discard_list[i];
81 if (!list_empty(discard_list)) {
82 block_group = list_first_entry(discard_list,
83 struct btrfs_block_group,
87 ret_block_group = block_group;
89 if (ret_block_group->discard_eligible_time < now)
92 if (ret_block_group->discard_eligible_time >
93 block_group->discard_eligible_time)
94 ret_block_group = block_group;
98 return ret_block_group;
102 * peek_discard_list - wrap find_next_block_group()
103 * @discard_ctl: discard control
105 * This wraps find_next_block_group() and sets the block_group to be in use.
107 static struct btrfs_block_group *peek_discard_list(
108 struct btrfs_discard_ctl *discard_ctl)
110 struct btrfs_block_group *block_group;
111 const u64 now = ktime_get_ns();
113 spin_lock(&discard_ctl->lock);
115 block_group = find_next_block_group(discard_ctl, now);
117 if (block_group && now < block_group->discard_eligible_time)
120 discard_ctl->block_group = block_group;
122 spin_unlock(&discard_ctl->lock);
128 * btrfs_discard_cancel_work - remove a block_group from the discard lists
129 * @discard_ctl: discard control
130 * @block_group: block_group of interest
132 * This removes @block_group from the discard lists. If necessary, it waits on
133 * the current work and then reschedules the delayed work.
135 void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
136 struct btrfs_block_group *block_group)
138 if (remove_from_discard_list(discard_ctl, block_group)) {
139 cancel_delayed_work_sync(&discard_ctl->work);
140 btrfs_discard_schedule_work(discard_ctl, true);
145 * btrfs_discard_queue_work - handles queuing the block_groups
146 * @discard_ctl: discard control
147 * @block_group: block_group of interest
149 * This maintains the LRU order of the discard lists.
151 void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
152 struct btrfs_block_group *block_group)
154 if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
157 add_to_discard_list(discard_ctl, block_group);
159 if (!delayed_work_pending(&discard_ctl->work))
160 btrfs_discard_schedule_work(discard_ctl, false);
164 * btrfs_discard_schedule_work - responsible for scheduling the discard work
165 * @discard_ctl: discard control
166 * @override: override the current timer
168 * Discards are issued by a delayed workqueue item. @override is used to
169 * update the current delay as the baseline delay interview is reevaluated
170 * on transaction commit. This is also maxed with any other rate limit.
172 void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
175 struct btrfs_block_group *block_group;
176 const u64 now = ktime_get_ns();
178 spin_lock(&discard_ctl->lock);
180 if (!btrfs_run_discard_work(discard_ctl))
183 if (!override && delayed_work_pending(&discard_ctl->work))
186 block_group = find_next_block_group(discard_ctl, now);
190 if (now < block_group->discard_eligible_time)
191 delay = nsecs_to_jiffies(
192 block_group->discard_eligible_time - now);
194 mod_delayed_work(discard_ctl->discard_workers,
195 &discard_ctl->work, delay);
198 spin_unlock(&discard_ctl->lock);
202 * btrfs_discard_workfn - discard work function
205 * This finds the next block_group to start discarding and then discards it.
207 static void btrfs_discard_workfn(struct work_struct *work)
209 struct btrfs_discard_ctl *discard_ctl;
210 struct btrfs_block_group *block_group;
213 discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
215 block_group = peek_discard_list(discard_ctl);
216 if (!block_group || !btrfs_run_discard_work(discard_ctl))
219 btrfs_trim_block_group(block_group, &trimmed, block_group->start,
220 btrfs_block_group_end(block_group), 0);
222 remove_from_discard_list(discard_ctl, block_group);
223 btrfs_discard_schedule_work(discard_ctl, false);
227 * btrfs_run_discard_work - determines if async discard should be running
228 * @discard_ctl: discard control
230 * Checks if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
232 bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
234 struct btrfs_fs_info *fs_info = container_of(discard_ctl,
235 struct btrfs_fs_info,
238 return (!(fs_info->sb->s_flags & SB_RDONLY) &&
239 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
242 void btrfs_discard_resume(struct btrfs_fs_info *fs_info)
244 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
245 btrfs_discard_cleanup(fs_info);
249 set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
252 void btrfs_discard_stop(struct btrfs_fs_info *fs_info)
254 clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
257 void btrfs_discard_init(struct btrfs_fs_info *fs_info)
259 struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl;
262 spin_lock_init(&discard_ctl->lock);
263 INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn);
265 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++)
266 INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
269 void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
271 btrfs_discard_stop(fs_info);
272 cancel_delayed_work_sync(&fs_info->discard_ctl.work);