1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/staging/erofs/utils.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
10 #include <linux/pagevec.h>
12 struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
16 if (!list_empty(pool)) {
17 page = lru_to_page(pool);
18 DBG_BUGON(page_ref_count(page) != 1);
21 page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
26 #if (EROFS_PCPUBUF_NR_PAGES > 0)
28 u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
29 } ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
31 void *erofs_get_pcpubuf(unsigned int pagenr)
34 return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
38 #ifdef CONFIG_EROFS_FS_ZIP
39 /* global shrink count (for all mounted EROFS instances) */
40 static atomic_long_t erofs_global_shrink_cnt;
42 #define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
43 #define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
45 static int erofs_workgroup_get(struct erofs_workgroup *grp)
50 o = erofs_wait_on_workgroup_freezed(grp);
54 if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
57 /* decrease refcount paired by erofs_workgroup_put */
59 atomic_long_dec(&erofs_global_shrink_cnt);
63 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
64 pgoff_t index, bool *tag)
66 struct erofs_sb_info *sbi = EROFS_SB(sb);
67 struct erofs_workgroup *grp;
71 grp = radix_tree_lookup(&sbi->workstn_tree, index);
73 *tag = xa_pointer_tag(grp);
74 grp = xa_untag_pointer(grp);
76 if (erofs_workgroup_get(grp)) {
77 /* prefer to relax rcu read side */
82 DBG_BUGON(index != grp->index);
88 int erofs_register_workgroup(struct super_block *sb,
89 struct erofs_workgroup *grp,
92 struct erofs_sb_info *sbi;
95 /* grp shouldn't be broken or used before */
96 if (unlikely(atomic_read(&grp->refcount) != 1)) {
101 err = radix_tree_preload(GFP_NOFS);
106 xa_lock(&sbi->workstn_tree);
108 grp = xa_tag_pointer(grp, tag);
111 * Bump up reference count before making this workgroup
112 * visible to other users in order to avoid potential UAF
113 * without serialized by workstn_lock.
115 __erofs_workgroup_get(grp);
117 err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp);
120 * it's safe to decrease since the workgroup isn't visible
121 * and refcount >= 2 (cannot be freezed).
123 __erofs_workgroup_put(grp);
125 xa_unlock(&sbi->workstn_tree);
126 radix_tree_preload_end();
130 static void __erofs_workgroup_free(struct erofs_workgroup *grp)
132 atomic_long_dec(&erofs_global_shrink_cnt);
133 erofs_workgroup_free_rcu(grp);
136 int erofs_workgroup_put(struct erofs_workgroup *grp)
138 int count = atomic_dec_return(&grp->refcount);
141 atomic_long_inc(&erofs_global_shrink_cnt);
143 __erofs_workgroup_free(grp);
147 static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
149 erofs_workgroup_unfreeze(grp, 0);
150 __erofs_workgroup_free(grp);
153 static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
154 struct erofs_workgroup *grp,
158 * If managed cache is on, refcount of workgroups
159 * themselves could be < 0 (freezed). In other words,
160 * there is no guarantee that all refcounts > 0.
162 if (!erofs_workgroup_try_to_freeze(grp, 1))
166 * Note that all cached pages should be unattached
167 * before deleted from the radix tree. Otherwise some
168 * cached pages could be still attached to the orphan
169 * old workgroup when the new one is available in the tree.
171 if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
172 erofs_workgroup_unfreeze(grp, 1);
177 * It's impossible to fail after the workgroup is freezed,
178 * however in order to avoid some race conditions, add a
179 * DBG_BUGON to observe this in advance.
181 DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
182 grp->index)) != grp);
185 * If managed cache is on, last refcount should indicate
186 * the related workstation.
188 erofs_workgroup_unfreeze_final(grp);
192 static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
193 unsigned long nr_shrink,
196 pgoff_t first_index = 0;
197 void *batch[PAGEVEC_SIZE];
198 unsigned int freed = 0;
202 xa_lock(&sbi->workstn_tree);
204 found = radix_tree_gang_lookup(&sbi->workstn_tree,
205 batch, first_index, PAGEVEC_SIZE);
207 for (i = 0; i < found; ++i) {
208 struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
210 first_index = grp->index + 1;
212 /* try to shrink each valid workgroup */
213 if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
217 if (unlikely(!--nr_shrink))
220 xa_unlock(&sbi->workstn_tree);
227 /* protected by 'erofs_sb_list_lock' */
228 static unsigned int shrinker_run_no;
230 /* protects the mounted 'erofs_sb_list' */
231 static DEFINE_SPINLOCK(erofs_sb_list_lock);
232 static LIST_HEAD(erofs_sb_list);
234 void erofs_shrinker_register(struct super_block *sb)
236 struct erofs_sb_info *sbi = EROFS_SB(sb);
238 mutex_init(&sbi->umount_mutex);
240 spin_lock(&erofs_sb_list_lock);
241 list_add(&sbi->list, &erofs_sb_list);
242 spin_unlock(&erofs_sb_list_lock);
245 void erofs_shrinker_unregister(struct super_block *sb)
247 struct erofs_sb_info *const sbi = EROFS_SB(sb);
249 mutex_lock(&sbi->umount_mutex);
250 erofs_shrink_workstation(sbi, ~0UL, true);
252 spin_lock(&erofs_sb_list_lock);
253 list_del(&sbi->list);
254 spin_unlock(&erofs_sb_list_lock);
255 mutex_unlock(&sbi->umount_mutex);
258 static unsigned long erofs_shrink_count(struct shrinker *shrink,
259 struct shrink_control *sc)
261 return atomic_long_read(&erofs_global_shrink_cnt);
264 static unsigned long erofs_shrink_scan(struct shrinker *shrink,
265 struct shrink_control *sc)
267 struct erofs_sb_info *sbi;
270 unsigned long nr = sc->nr_to_scan;
272 unsigned long freed = 0;
274 spin_lock(&erofs_sb_list_lock);
276 run_no = ++shrinker_run_no;
277 } while (run_no == 0);
279 /* Iterate over all mounted superblocks and try to shrink them */
280 p = erofs_sb_list.next;
281 while (p != &erofs_sb_list) {
282 sbi = list_entry(p, struct erofs_sb_info, list);
285 * We move the ones we do to the end of the list, so we stop
286 * when we see one we have already done.
288 if (sbi->shrinker_run_no == run_no)
291 if (!mutex_trylock(&sbi->umount_mutex)) {
296 spin_unlock(&erofs_sb_list_lock);
297 sbi->shrinker_run_no = run_no;
299 freed += erofs_shrink_workstation(sbi, nr, false);
301 spin_lock(&erofs_sb_list_lock);
302 /* Get the next list element before we move this one */
306 * Move this one to the end of the list to provide some
309 list_move_tail(&sbi->list, &erofs_sb_list);
310 mutex_unlock(&sbi->umount_mutex);
315 spin_unlock(&erofs_sb_list_lock);
319 static struct shrinker erofs_shrinker_info = {
320 .scan_objects = erofs_shrink_scan,
321 .count_objects = erofs_shrink_count,
322 .seeks = DEFAULT_SEEKS,
325 int __init erofs_init_shrinker(void)
327 return register_shrinker(&erofs_shrinker_info);
330 void erofs_exit_shrinker(void)
332 unregister_shrinker(&erofs_shrinker_info);
334 #endif /* !CONFIG_EROFS_FS_ZIP */