]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/btrfs/discard.c
5924e757471b790e78773938fdc5aef5d5bd0e0d
[linux.git] / fs / btrfs / discard.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/jiffies.h>
4 #include <linux/kernel.h>
5 #include <linux/ktime.h>
6 #include <linux/list.h>
7 #include <linux/sizes.h>
8 #include <linux/workqueue.h>
9 #include "ctree.h"
10 #include "block-group.h"
11 #include "discard.h"
12 #include "free-space-cache.h"
13
14 /* This is an initial delay to give some chance for block reuse */
15 #define BTRFS_DISCARD_DELAY             (120ULL * NSEC_PER_SEC)
16
17 static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
18                                           struct btrfs_block_group *block_group)
19 {
20         return &discard_ctl->discard_list[block_group->discard_index];
21 }
22
23 static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
24                                 struct btrfs_block_group *block_group)
25 {
26         spin_lock(&discard_ctl->lock);
27
28         if (!btrfs_run_discard_work(discard_ctl)) {
29                 spin_unlock(&discard_ctl->lock);
30                 return;
31         }
32
33         if (list_empty(&block_group->discard_list))
34                 block_group->discard_eligible_time = (ktime_get_ns() +
35                                                       BTRFS_DISCARD_DELAY);
36
37         list_move_tail(&block_group->discard_list,
38                        get_discard_list(discard_ctl, block_group));
39
40         spin_unlock(&discard_ctl->lock);
41 }
42
43 static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
44                                      struct btrfs_block_group *block_group)
45 {
46         bool running = false;
47
48         spin_lock(&discard_ctl->lock);
49
50         if (block_group == discard_ctl->block_group) {
51                 running = true;
52                 discard_ctl->block_group = NULL;
53         }
54
55         block_group->discard_eligible_time = 0;
56         list_del_init(&block_group->discard_list);
57
58         spin_unlock(&discard_ctl->lock);
59
60         return running;
61 }
62
63 /**
64  * find_next_block_group - find block_group that's up next for discarding
65  * @discard_ctl: discard control
66  * @now: current time
67  *
68  * Iterate over the discard lists to find the next block_group up for
69  * discarding checking the discard_eligible_time of block_group.
70  */
71 static struct btrfs_block_group *find_next_block_group(
72                                         struct btrfs_discard_ctl *discard_ctl,
73                                         u64 now)
74 {
75         struct btrfs_block_group *ret_block_group = NULL, *block_group;
76         int i;
77
78         for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
79                 struct list_head *discard_list = &discard_ctl->discard_list[i];
80
81                 if (!list_empty(discard_list)) {
82                         block_group = list_first_entry(discard_list,
83                                                        struct btrfs_block_group,
84                                                        discard_list);
85
86                         if (!ret_block_group)
87                                 ret_block_group = block_group;
88
89                         if (ret_block_group->discard_eligible_time < now)
90                                 break;
91
92                         if (ret_block_group->discard_eligible_time >
93                             block_group->discard_eligible_time)
94                                 ret_block_group = block_group;
95                 }
96         }
97
98         return ret_block_group;
99 }
100
101 /**
102  * peek_discard_list - wrap find_next_block_group()
103  * @discard_ctl: discard control
104  *
105  * This wraps find_next_block_group() and sets the block_group to be in use.
106  */
107 static struct btrfs_block_group *peek_discard_list(
108                                         struct btrfs_discard_ctl *discard_ctl)
109 {
110         struct btrfs_block_group *block_group;
111         const u64 now = ktime_get_ns();
112
113         spin_lock(&discard_ctl->lock);
114
115         block_group = find_next_block_group(discard_ctl, now);
116
117         if (block_group && now < block_group->discard_eligible_time)
118                 block_group = NULL;
119
120         discard_ctl->block_group = block_group;
121
122         spin_unlock(&discard_ctl->lock);
123
124         return block_group;
125 }
126
127 /**
128  * btrfs_discard_cancel_work - remove a block_group from the discard lists
129  * @discard_ctl: discard control
130  * @block_group: block_group of interest
131  *
132  * This removes @block_group from the discard lists.  If necessary, it waits on
133  * the current work and then reschedules the delayed work.
134  */
135 void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
136                                struct btrfs_block_group *block_group)
137 {
138         if (remove_from_discard_list(discard_ctl, block_group)) {
139                 cancel_delayed_work_sync(&discard_ctl->work);
140                 btrfs_discard_schedule_work(discard_ctl, true);
141         }
142 }
143
144 /**
145  * btrfs_discard_queue_work - handles queuing the block_groups
146  * @discard_ctl: discard control
147  * @block_group: block_group of interest
148  *
149  * This maintains the LRU order of the discard lists.
150  */
151 void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
152                               struct btrfs_block_group *block_group)
153 {
154         if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
155                 return;
156
157         add_to_discard_list(discard_ctl, block_group);
158
159         if (!delayed_work_pending(&discard_ctl->work))
160                 btrfs_discard_schedule_work(discard_ctl, false);
161 }
162
163 /**
164  * btrfs_discard_schedule_work - responsible for scheduling the discard work
165  * @discard_ctl: discard control
166  * @override: override the current timer
167  *
168  * Discards are issued by a delayed workqueue item.  @override is used to
169  * update the current delay as the baseline delay interview is reevaluated
170  * on transaction commit.  This is also maxed with any other rate limit.
171  */
172 void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
173                                  bool override)
174 {
175         struct btrfs_block_group *block_group;
176         const u64 now = ktime_get_ns();
177
178         spin_lock(&discard_ctl->lock);
179
180         if (!btrfs_run_discard_work(discard_ctl))
181                 goto out;
182
183         if (!override && delayed_work_pending(&discard_ctl->work))
184                 goto out;
185
186         block_group = find_next_block_group(discard_ctl, now);
187         if (block_group) {
188                 u64 delay = 0;
189
190                 if (now < block_group->discard_eligible_time)
191                         delay = nsecs_to_jiffies(
192                                 block_group->discard_eligible_time - now);
193
194                 mod_delayed_work(discard_ctl->discard_workers,
195                                  &discard_ctl->work, delay);
196         }
197 out:
198         spin_unlock(&discard_ctl->lock);
199 }
200
201 /**
202  * btrfs_discard_workfn - discard work function
203  * @work: work
204  *
205  * This finds the next block_group to start discarding and then discards it.
206  */
207 static void btrfs_discard_workfn(struct work_struct *work)
208 {
209         struct btrfs_discard_ctl *discard_ctl;
210         struct btrfs_block_group *block_group;
211         u64 trimmed = 0;
212
213         discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
214
215         block_group = peek_discard_list(discard_ctl);
216         if (!block_group || !btrfs_run_discard_work(discard_ctl))
217                 return;
218
219         btrfs_trim_block_group(block_group, &trimmed, block_group->start,
220                                btrfs_block_group_end(block_group), 0);
221
222         remove_from_discard_list(discard_ctl, block_group);
223         btrfs_discard_schedule_work(discard_ctl, false);
224 }
225
226 /**
227  * btrfs_run_discard_work - determines if async discard should be running
228  * @discard_ctl: discard control
229  *
230  * Checks if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
231  */
232 bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
233 {
234         struct btrfs_fs_info *fs_info = container_of(discard_ctl,
235                                                      struct btrfs_fs_info,
236                                                      discard_ctl);
237
238         return (!(fs_info->sb->s_flags & SB_RDONLY) &&
239                 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
240 }
241
242 void btrfs_discard_resume(struct btrfs_fs_info *fs_info)
243 {
244         if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
245                 btrfs_discard_cleanup(fs_info);
246                 return;
247         }
248
249         set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
250 }
251
252 void btrfs_discard_stop(struct btrfs_fs_info *fs_info)
253 {
254         clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
255 }
256
257 void btrfs_discard_init(struct btrfs_fs_info *fs_info)
258 {
259         struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl;
260         int i;
261
262         spin_lock_init(&discard_ctl->lock);
263         INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn);
264
265         for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++)
266                 INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
267 }
268
269 void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
270 {
271         btrfs_discard_stop(fs_info);
272         cancel_delayed_work_sync(&fs_info->discard_ctl.work);
273 }