]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/erofs/utils.c
staging: rtl8192u: fix spacing in ieee80211
[linux.git] / drivers / staging / erofs / utils.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/drivers/staging/erofs/utils.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  */
9 #include "internal.h"
10 #include <linux/pagevec.h>
11
12 struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
13 {
14         struct page *page;
15
16         if (!list_empty(pool)) {
17                 page = lru_to_page(pool);
18                 DBG_BUGON(page_ref_count(page) != 1);
19                 list_del(&page->lru);
20         } else {
21                 page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
22         }
23         return page;
24 }
25
26 #if (EROFS_PCPUBUF_NR_PAGES > 0)
27 static struct {
28         u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
29 } ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
30
31 void *erofs_get_pcpubuf(unsigned int pagenr)
32 {
33         preempt_disable();
34         return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
35 }
36 #endif
37
38 #ifdef CONFIG_EROFS_FS_ZIP
39 /* global shrink count (for all mounted EROFS instances) */
40 static atomic_long_t erofs_global_shrink_cnt;
41
42 #define __erofs_workgroup_get(grp)      atomic_inc(&(grp)->refcount)
43 #define __erofs_workgroup_put(grp)      atomic_dec(&(grp)->refcount)
44
45 static int erofs_workgroup_get(struct erofs_workgroup *grp)
46 {
47         int o;
48
49 repeat:
50         o = erofs_wait_on_workgroup_freezed(grp);
51         if (unlikely(o <= 0))
52                 return -1;
53
54         if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
55                 goto repeat;
56
57         /* decrease refcount paired by erofs_workgroup_put */
58         if (unlikely(o == 1))
59                 atomic_long_dec(&erofs_global_shrink_cnt);
60         return 0;
61 }
62
63 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
64                                              pgoff_t index, bool *tag)
65 {
66         struct erofs_sb_info *sbi = EROFS_SB(sb);
67         struct erofs_workgroup *grp;
68
69 repeat:
70         rcu_read_lock();
71         grp = radix_tree_lookup(&sbi->workstn_tree, index);
72         if (grp) {
73                 *tag = xa_pointer_tag(grp);
74                 grp = xa_untag_pointer(grp);
75
76                 if (erofs_workgroup_get(grp)) {
77                         /* prefer to relax rcu read side */
78                         rcu_read_unlock();
79                         goto repeat;
80                 }
81
82                 DBG_BUGON(index != grp->index);
83         }
84         rcu_read_unlock();
85         return grp;
86 }
87
88 int erofs_register_workgroup(struct super_block *sb,
89                              struct erofs_workgroup *grp,
90                              bool tag)
91 {
92         struct erofs_sb_info *sbi;
93         int err;
94
95         /* grp shouldn't be broken or used before */
96         if (unlikely(atomic_read(&grp->refcount) != 1)) {
97                 DBG_BUGON(1);
98                 return -EINVAL;
99         }
100
101         err = radix_tree_preload(GFP_NOFS);
102         if (err)
103                 return err;
104
105         sbi = EROFS_SB(sb);
106         xa_lock(&sbi->workstn_tree);
107
108         grp = xa_tag_pointer(grp, tag);
109
110         /*
111          * Bump up reference count before making this workgroup
112          * visible to other users in order to avoid potential UAF
113          * without serialized by workstn_lock.
114          */
115         __erofs_workgroup_get(grp);
116
117         err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp);
118         if (unlikely(err))
119                 /*
120                  * it's safe to decrease since the workgroup isn't visible
121                  * and refcount >= 2 (cannot be freezed).
122                  */
123                 __erofs_workgroup_put(grp);
124
125         xa_unlock(&sbi->workstn_tree);
126         radix_tree_preload_end();
127         return err;
128 }
129
130 static void  __erofs_workgroup_free(struct erofs_workgroup *grp)
131 {
132         atomic_long_dec(&erofs_global_shrink_cnt);
133         erofs_workgroup_free_rcu(grp);
134 }
135
136 int erofs_workgroup_put(struct erofs_workgroup *grp)
137 {
138         int count = atomic_dec_return(&grp->refcount);
139
140         if (count == 1)
141                 atomic_long_inc(&erofs_global_shrink_cnt);
142         else if (!count)
143                 __erofs_workgroup_free(grp);
144         return count;
145 }
146
147 static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
148 {
149         erofs_workgroup_unfreeze(grp, 0);
150         __erofs_workgroup_free(grp);
151 }
152
153 static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
154                                            struct erofs_workgroup *grp,
155                                            bool cleanup)
156 {
157         /*
158          * If managed cache is on, refcount of workgroups
159          * themselves could be < 0 (freezed). In other words,
160          * there is no guarantee that all refcounts > 0.
161          */
162         if (!erofs_workgroup_try_to_freeze(grp, 1))
163                 return false;
164
165         /*
166          * Note that all cached pages should be unattached
167          * before deleted from the radix tree. Otherwise some
168          * cached pages could be still attached to the orphan
169          * old workgroup when the new one is available in the tree.
170          */
171         if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
172                 erofs_workgroup_unfreeze(grp, 1);
173                 return false;
174         }
175
176         /*
177          * It's impossible to fail after the workgroup is freezed,
178          * however in order to avoid some race conditions, add a
179          * DBG_BUGON to observe this in advance.
180          */
181         DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
182                                                      grp->index)) != grp);
183
184         /*
185          * If managed cache is on, last refcount should indicate
186          * the related workstation.
187          */
188         erofs_workgroup_unfreeze_final(grp);
189         return true;
190 }
191
192 static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
193                                               unsigned long nr_shrink,
194                                               bool cleanup)
195 {
196         pgoff_t first_index = 0;
197         void *batch[PAGEVEC_SIZE];
198         unsigned int freed = 0;
199
200         int i, found;
201 repeat:
202         xa_lock(&sbi->workstn_tree);
203
204         found = radix_tree_gang_lookup(&sbi->workstn_tree,
205                                        batch, first_index, PAGEVEC_SIZE);
206
207         for (i = 0; i < found; ++i) {
208                 struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
209
210                 first_index = grp->index + 1;
211
212                 /* try to shrink each valid workgroup */
213                 if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
214                         continue;
215
216                 ++freed;
217                 if (unlikely(!--nr_shrink))
218                         break;
219         }
220         xa_unlock(&sbi->workstn_tree);
221
222         if (i && nr_shrink)
223                 goto repeat;
224         return freed;
225 }
226
227 /* protected by 'erofs_sb_list_lock' */
228 static unsigned int shrinker_run_no;
229
230 /* protects the mounted 'erofs_sb_list' */
231 static DEFINE_SPINLOCK(erofs_sb_list_lock);
232 static LIST_HEAD(erofs_sb_list);
233
234 void erofs_shrinker_register(struct super_block *sb)
235 {
236         struct erofs_sb_info *sbi = EROFS_SB(sb);
237
238         mutex_init(&sbi->umount_mutex);
239
240         spin_lock(&erofs_sb_list_lock);
241         list_add(&sbi->list, &erofs_sb_list);
242         spin_unlock(&erofs_sb_list_lock);
243 }
244
245 void erofs_shrinker_unregister(struct super_block *sb)
246 {
247         struct erofs_sb_info *const sbi = EROFS_SB(sb);
248
249         mutex_lock(&sbi->umount_mutex);
250         erofs_shrink_workstation(sbi, ~0UL, true);
251
252         spin_lock(&erofs_sb_list_lock);
253         list_del(&sbi->list);
254         spin_unlock(&erofs_sb_list_lock);
255         mutex_unlock(&sbi->umount_mutex);
256 }
257
258 static unsigned long erofs_shrink_count(struct shrinker *shrink,
259                                         struct shrink_control *sc)
260 {
261         return atomic_long_read(&erofs_global_shrink_cnt);
262 }
263
264 static unsigned long erofs_shrink_scan(struct shrinker *shrink,
265                                        struct shrink_control *sc)
266 {
267         struct erofs_sb_info *sbi;
268         struct list_head *p;
269
270         unsigned long nr = sc->nr_to_scan;
271         unsigned int run_no;
272         unsigned long freed = 0;
273
274         spin_lock(&erofs_sb_list_lock);
275         do {
276                 run_no = ++shrinker_run_no;
277         } while (run_no == 0);
278
279         /* Iterate over all mounted superblocks and try to shrink them */
280         p = erofs_sb_list.next;
281         while (p != &erofs_sb_list) {
282                 sbi = list_entry(p, struct erofs_sb_info, list);
283
284                 /*
285                  * We move the ones we do to the end of the list, so we stop
286                  * when we see one we have already done.
287                  */
288                 if (sbi->shrinker_run_no == run_no)
289                         break;
290
291                 if (!mutex_trylock(&sbi->umount_mutex)) {
292                         p = p->next;
293                         continue;
294                 }
295
296                 spin_unlock(&erofs_sb_list_lock);
297                 sbi->shrinker_run_no = run_no;
298
299                 freed += erofs_shrink_workstation(sbi, nr, false);
300
301                 spin_lock(&erofs_sb_list_lock);
302                 /* Get the next list element before we move this one */
303                 p = p->next;
304
305                 /*
306                  * Move this one to the end of the list to provide some
307                  * fairness.
308                  */
309                 list_move_tail(&sbi->list, &erofs_sb_list);
310                 mutex_unlock(&sbi->umount_mutex);
311
312                 if (freed >= nr)
313                         break;
314         }
315         spin_unlock(&erofs_sb_list_lock);
316         return freed;
317 }
318
319 static struct shrinker erofs_shrinker_info = {
320         .scan_objects = erofs_shrink_scan,
321         .count_objects = erofs_shrink_count,
322         .seeks = DEFAULT_SEEKS,
323 };
324
325 int __init erofs_init_shrinker(void)
326 {
327         return register_shrinker(&erofs_shrinker_info);
328 }
329
330 void erofs_exit_shrinker(void)
331 {
332         unregister_shrinker(&erofs_shrinker_info);
333 }
334 #endif  /* !CONFIG_EROFS_FS_ZIP */
335