]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/erofs/unzip_vle.c
f3d0d2c03939e852cdc13d4167538afa0c61b95d
[linux.git] / drivers / staging / erofs / unzip_vle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
15
16 #include <trace/events/erofs.h>
17
18 /*
19  * a compressed_pages[] placeholder in order to avoid
20  * being filled with file pages for in-place decompression.
21  */
22 #define PAGE_UNALLOCATED     ((void *)0x5F0E4B1D)
23
24 /* how to allocate cached pages for a workgroup */
25 enum z_erofs_cache_alloctype {
26         DONTALLOC,      /* don't allocate any cached pages */
27         DELAYEDALLOC,   /* delayed allocation (at the time of submitting io) */
28 };
29
30 /*
31  * tagged pointer with 1-bit tag for all compressed pages
32  * tag 0 - the page is just found with an extra page reference
33  */
34 typedef tagptr1_t compressed_page_t;
35
36 #define tag_compressed_page_justfound(page) \
37         tagptr_fold(compressed_page_t, page, 1)
38
39 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
40 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
41
42 void z_erofs_exit_zip_subsystem(void)
43 {
44         destroy_workqueue(z_erofs_workqueue);
45         kmem_cache_destroy(z_erofs_workgroup_cachep);
46 }
47
48 static inline int init_unzip_workqueue(void)
49 {
50         const unsigned int onlinecpus = num_possible_cpus();
51
52         /*
53          * we don't need too many threads, limiting threads
54          * could improve scheduling performance.
55          */
56         z_erofs_workqueue =
57                 alloc_workqueue("erofs_unzipd",
58                                 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
59                                 onlinecpus + onlinecpus / 4);
60
61         return z_erofs_workqueue ? 0 : -ENOMEM;
62 }
63
64 static void init_once(void *ptr)
65 {
66         struct z_erofs_vle_workgroup *grp = ptr;
67         struct z_erofs_vle_work *const work =
68                 z_erofs_vle_grab_primary_work(grp);
69         unsigned int i;
70
71         mutex_init(&work->lock);
72         work->nr_pages = 0;
73         work->vcnt = 0;
74         for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
75                 grp->compressed_pages[i] = NULL;
76 }
77
78 static void init_always(struct z_erofs_vle_workgroup *grp)
79 {
80         struct z_erofs_vle_work *const work =
81                 z_erofs_vle_grab_primary_work(grp);
82
83         atomic_set(&grp->obj.refcount, 1);
84         grp->flags = 0;
85
86         DBG_BUGON(work->nr_pages);
87         DBG_BUGON(work->vcnt);
88 }
89
90 int __init z_erofs_init_zip_subsystem(void)
91 {
92         z_erofs_workgroup_cachep =
93                 kmem_cache_create("erofs_compress",
94                                   Z_EROFS_WORKGROUP_SIZE, 0,
95                                   SLAB_RECLAIM_ACCOUNT, init_once);
96
97         if (z_erofs_workgroup_cachep) {
98                 if (!init_unzip_workqueue())
99                         return 0;
100
101                 kmem_cache_destroy(z_erofs_workgroup_cachep);
102         }
103         return -ENOMEM;
104 }
105
106 enum z_erofs_vle_work_role {
107         Z_EROFS_VLE_WORK_SECONDARY,
108         Z_EROFS_VLE_WORK_PRIMARY,
109         /*
110          * The current work was the tail of an exist chain, and the previous
111          * processed chained works are all decided to be hooked up to it.
112          * A new chain should be created for the remaining unprocessed works,
113          * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
114          * the next work cannot reuse the whole page in the following scenario:
115          *  ________________________________________________________________
116          * |      tail (partial) page     |       head (partial) page       |
117          * |  (belongs to the next work)  |  (belongs to the current work)  |
118          * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
119          */
120         Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
121         /*
122          * The current work has been linked with the processed chained works,
123          * and could be also linked with the potential remaining works, which
124          * means if the processing page is the tail partial page of the work,
125          * the current work can safely use the whole page (since the next work
126          * is under control) for in-place decompression, as illustrated below:
127          *  ________________________________________________________________
128          * |  tail (partial) page  |          head (partial) page           |
129          * | (of the current work) |         (of the previous work)         |
130          * |  PRIMARY_FOLLOWED or  |                                        |
131          * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
132          *
133          * [  (*) the above page can be used for the current work itself.  ]
134          */
135         Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
136         Z_EROFS_VLE_WORK_MAX
137 };
138
139 struct z_erofs_vle_work_builder {
140         enum z_erofs_vle_work_role role;
141         /*
142          * 'hosted = false' means that the current workgroup doesn't belong to
143          * the owned chained workgroups. In the other words, it is none of our
144          * business to submit this workgroup.
145          */
146         bool hosted;
147
148         struct z_erofs_vle_workgroup *grp;
149         struct z_erofs_vle_work *work;
150         struct z_erofs_pagevec_ctor vector;
151
152         /* pages used for reading the compressed data */
153         struct page **compressed_pages;
154         unsigned int compressed_deficit;
155 };
156
157 #define VLE_WORK_BUILDER_INIT() \
158         { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
159
160 #ifdef EROFS_FS_HAS_MANAGED_CACHE
161 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
162                                      struct address_space *mc,
163                                      pgoff_t index,
164                                      unsigned int clusterpages,
165                                      enum z_erofs_cache_alloctype type,
166                                      struct list_head *pagepool,
167                                      gfp_t gfp)
168 {
169         struct page **const pages = bl->compressed_pages;
170         const unsigned int remaining = bl->compressed_deficit;
171         bool standalone = true;
172         unsigned int i, j = 0;
173
174         if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
175                 return;
176
177         gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
178
179         index += clusterpages - remaining;
180
181         for (i = 0; i < remaining; ++i) {
182                 struct page *page;
183                 compressed_page_t t;
184
185                 /* the compressed page was loaded before */
186                 if (READ_ONCE(pages[i]))
187                         continue;
188
189                 page = find_get_page(mc, index + i);
190
191                 if (page) {
192                         t = tag_compressed_page_justfound(page);
193                 } else if (type == DELAYEDALLOC) {
194                         t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
195                 } else {        /* DONTALLOC */
196                         if (standalone)
197                                 j = i;
198                         standalone = false;
199                         continue;
200                 }
201
202                 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
203                         continue;
204
205                 if (page)
206                         put_page(page);
207         }
208         bl->compressed_pages += j;
209         bl->compressed_deficit = remaining - j;
210
211         if (standalone)
212                 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
213 }
214
215 /* called by erofs_shrinker to get rid of all compressed_pages */
216 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
217                                        struct erofs_workgroup *egrp)
218 {
219         struct z_erofs_vle_workgroup *const grp =
220                 container_of(egrp, struct z_erofs_vle_workgroup, obj);
221         struct address_space *const mapping = MNGD_MAPPING(sbi);
222         const int clusterpages = erofs_clusterpages(sbi);
223         int i;
224
225         /*
226          * refcount of workgroup is now freezed as 1,
227          * therefore no need to worry about available decompression users.
228          */
229         for (i = 0; i < clusterpages; ++i) {
230                 struct page *page = grp->compressed_pages[i];
231
232                 if (!page || page->mapping != mapping)
233                         continue;
234
235                 /* block other users from reclaiming or migrating the page */
236                 if (!trylock_page(page))
237                         return -EBUSY;
238
239                 /* barrier is implied in the following 'unlock_page' */
240                 WRITE_ONCE(grp->compressed_pages[i], NULL);
241
242                 set_page_private(page, 0);
243                 ClearPagePrivate(page);
244
245                 unlock_page(page);
246                 put_page(page);
247         }
248         return 0;
249 }
250
251 int erofs_try_to_free_cached_page(struct address_space *mapping,
252                                   struct page *page)
253 {
254         struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
255         const unsigned int clusterpages = erofs_clusterpages(sbi);
256         struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
257         int ret = 0;    /* 0 - busy */
258
259         if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
260                 unsigned int i;
261
262                 for (i = 0; i < clusterpages; ++i) {
263                         if (grp->compressed_pages[i] == page) {
264                                 WRITE_ONCE(grp->compressed_pages[i], NULL);
265                                 ret = 1;
266                                 break;
267                         }
268                 }
269                 erofs_workgroup_unfreeze(&grp->obj, 1);
270
271                 if (ret) {
272                         ClearPagePrivate(page);
273                         put_page(page);
274                 }
275         }
276         return ret;
277 }
278 #else
279 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
280                                      struct address_space *mc,
281                                      pgoff_t index,
282                                      unsigned int clusterpages,
283                                      enum z_erofs_cache_alloctype type,
284                                      struct list_head *pagepool,
285                                      gfp_t gfp)
286 {
287         /* nowhere to load compressed pages from */
288 }
289 #endif
290
291 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
292 static inline bool try_to_reuse_as_compressed_page(
293         struct z_erofs_vle_work_builder *b,
294         struct page *page)
295 {
296         while (b->compressed_deficit) {
297                 --b->compressed_deficit;
298                 if (!cmpxchg(b->compressed_pages++, NULL, page))
299                         return true;
300         }
301
302         return false;
303 }
304
305 /* callers must be with work->lock held */
306 static int z_erofs_vle_work_add_page(
307         struct z_erofs_vle_work_builder *builder,
308         struct page *page,
309         enum z_erofs_page_type type)
310 {
311         int ret;
312         bool occupied;
313
314         /* give priority for the compressed data storage */
315         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
316             type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
317             try_to_reuse_as_compressed_page(builder, page))
318                 return 0;
319
320         ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
321                                            page, type, &occupied);
322         builder->work->vcnt += (unsigned int)ret;
323
324         return ret ? 0 : -EAGAIN;
325 }
326
327 static enum z_erofs_vle_work_role
328 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
329                        z_erofs_vle_owned_workgrp_t *owned_head,
330                        bool *hosted)
331 {
332         DBG_BUGON(*hosted);
333
334         /* let's claim these following types of workgroup */
335 retry:
336         if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
337                 /* type 1, nil workgroup */
338                 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
339                             *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
340                         goto retry;
341
342                 *owned_head = &grp->next;
343                 *hosted = true;
344                 /* lucky, I am the followee :) */
345                 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
346
347         } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
348                 /*
349                  * type 2, link to the end of a existing open chain,
350                  * be careful that its submission itself is governed
351                  * by the original owned chain.
352                  */
353                 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
354                             *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
355                         goto retry;
356                 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
357                 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
358         }
359
360         return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
361 }
362
363 struct z_erofs_vle_work_finder {
364         struct super_block *sb;
365         pgoff_t idx;
366         unsigned int pageofs;
367
368         struct z_erofs_vle_workgroup **grp_ret;
369         enum z_erofs_vle_work_role *role;
370         z_erofs_vle_owned_workgrp_t *owned_head;
371         bool *hosted;
372 };
373
374 static struct z_erofs_vle_work *
375 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
376 {
377         bool tag, primary;
378         struct erofs_workgroup *egrp;
379         struct z_erofs_vle_workgroup *grp;
380         struct z_erofs_vle_work *work;
381
382         egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
383         if (!egrp) {
384                 *f->grp_ret = NULL;
385                 return NULL;
386         }
387
388         grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
389         *f->grp_ret = grp;
390
391         work = z_erofs_vle_grab_work(grp, f->pageofs);
392         /* if multiref is disabled, `primary' is always true */
393         primary = true;
394
395         DBG_BUGON(work->pageofs != f->pageofs);
396
397         /*
398          * lock must be taken first to avoid grp->next == NIL between
399          * claiming workgroup and adding pages:
400          *                        grp->next != NIL
401          *   grp->next = NIL
402          *   mutex_unlock_all
403          *                        mutex_lock(&work->lock)
404          *                        add all pages to pagevec
405          *
406          * [correct locking case 1]:
407          *   mutex_lock(grp->work[a])
408          *   ...
409          *   mutex_lock(grp->work[b])     mutex_lock(grp->work[c])
410          *   ...                          *role = SECONDARY
411          *                                add all pages to pagevec
412          *                                ...
413          *                                mutex_unlock(grp->work[c])
414          *   mutex_lock(grp->work[c])
415          *   ...
416          *   grp->next = NIL
417          *   mutex_unlock_all
418          *
419          * [correct locking case 2]:
420          *   mutex_lock(grp->work[b])
421          *   ...
422          *   mutex_lock(grp->work[a])
423          *   ...
424          *   mutex_lock(grp->work[c])
425          *   ...
426          *   grp->next = NIL
427          *   mutex_unlock_all
428          *                                mutex_lock(grp->work[a])
429          *                                *role = PRIMARY_OWNER
430          *                                add all pages to pagevec
431          *                                ...
432          */
433         mutex_lock(&work->lock);
434
435         *f->hosted = false;
436         if (!primary)
437                 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
438         else    /* claim the workgroup if possible */
439                 *f->role = try_to_claim_workgroup(grp, f->owned_head,
440                                                   f->hosted);
441         return work;
442 }
443
444 static struct z_erofs_vle_work *
445 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
446                           struct erofs_map_blocks *map)
447 {
448         bool gnew = false;
449         struct z_erofs_vle_workgroup *grp = *f->grp_ret;
450         struct z_erofs_vle_work *work;
451
452         /* if multiref is disabled, grp should never be nullptr */
453         if (unlikely(grp)) {
454                 DBG_BUGON(1);
455                 return ERR_PTR(-EINVAL);
456         }
457
458         /* no available workgroup, let's allocate one */
459         grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
460         if (unlikely(!grp))
461                 return ERR_PTR(-ENOMEM);
462
463         init_always(grp);
464         grp->obj.index = f->idx;
465         grp->llen = map->m_llen;
466
467         z_erofs_vle_set_workgrp_fmt(grp, (map->m_flags & EROFS_MAP_ZIPPED) ?
468                                     Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
469                                     Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
470
471         /* new workgrps have been claimed as type 1 */
472         WRITE_ONCE(grp->next, *f->owned_head);
473         /* primary and followed work for all new workgrps */
474         *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
475         /* it should be submitted by ourselves */
476         *f->hosted = true;
477
478         gnew = true;
479         work = z_erofs_vle_grab_primary_work(grp);
480         work->pageofs = f->pageofs;
481
482         /*
483          * lock all primary followed works before visible to others
484          * and mutex_trylock *never* fails for a new workgroup.
485          */
486         mutex_trylock(&work->lock);
487
488         if (gnew) {
489                 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
490
491                 if (err) {
492                         mutex_unlock(&work->lock);
493                         kmem_cache_free(z_erofs_workgroup_cachep, grp);
494                         return ERR_PTR(-EAGAIN);
495                 }
496         }
497
498         *f->owned_head = &grp->next;
499         *f->grp_ret = grp;
500         return work;
501 }
502
503 #define builder_is_hooked(builder) \
504         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
505
506 #define builder_is_followed(builder) \
507         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
508
509 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
510                                        struct super_block *sb,
511                                        struct erofs_map_blocks *map,
512                                        z_erofs_vle_owned_workgrp_t *owned_head)
513 {
514         const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
515         struct z_erofs_vle_workgroup *grp;
516         const struct z_erofs_vle_work_finder finder = {
517                 .sb = sb,
518                 .idx = erofs_blknr(map->m_pa),
519                 .pageofs = map->m_la & ~PAGE_MASK,
520                 .grp_ret = &grp,
521                 .role = &builder->role,
522                 .owned_head = owned_head,
523                 .hosted = &builder->hosted
524         };
525         struct z_erofs_vle_work *work;
526
527         DBG_BUGON(builder->work);
528
529         /* must be Z_EROFS_WORK_TAIL or the next chained work */
530         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
531         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
532
533         DBG_BUGON(erofs_blkoff(map->m_pa));
534
535 repeat:
536         work = z_erofs_vle_work_lookup(&finder);
537         if (work) {
538                 unsigned int orig_llen;
539
540                 /* increase workgroup `llen' if needed */
541                 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
542                        orig_llen != cmpxchg_relaxed(&grp->llen,
543                                                     orig_llen, map->m_llen))
544                         cpu_relax();
545                 goto got_it;
546         }
547
548         work = z_erofs_vle_work_register(&finder, map);
549         if (unlikely(work == ERR_PTR(-EAGAIN)))
550                 goto repeat;
551
552         if (IS_ERR(work))
553                 return PTR_ERR(work);
554 got_it:
555         z_erofs_pagevec_ctor_init(&builder->vector,
556                                   Z_EROFS_VLE_INLINE_PAGEVECS,
557                                   work->pagevec, work->vcnt);
558
559         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
560                 /* enable possibly in-place decompression */
561                 builder->compressed_pages = grp->compressed_pages;
562                 builder->compressed_deficit = clusterpages;
563         } else {
564                 builder->compressed_pages = NULL;
565                 builder->compressed_deficit = 0;
566         }
567
568         builder->grp = grp;
569         builder->work = work;
570         return 0;
571 }
572
573 /*
574  * keep in mind that no referenced workgroups will be freed
575  * only after a RCU grace period, so rcu_read_lock() could
576  * prevent a workgroup from being freed.
577  */
578 static void z_erofs_rcu_callback(struct rcu_head *head)
579 {
580         struct z_erofs_vle_work *work = container_of(head,
581                 struct z_erofs_vle_work, rcu);
582         struct z_erofs_vle_workgroup *grp =
583                 z_erofs_vle_work_workgroup(work, true);
584
585         kmem_cache_free(z_erofs_workgroup_cachep, grp);
586 }
587
588 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
589 {
590         struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
591                 struct z_erofs_vle_workgroup, obj);
592         struct z_erofs_vle_work *const work = &vgrp->work;
593
594         call_rcu(&work->rcu, z_erofs_rcu_callback);
595 }
596
597 static void
598 __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
599                            struct z_erofs_vle_work *work __maybe_unused)
600 {
601         erofs_workgroup_put(&grp->obj);
602 }
603
604 static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
605 {
606         struct z_erofs_vle_workgroup *grp =
607                 z_erofs_vle_work_workgroup(work, true);
608
609         __z_erofs_vle_work_release(grp, work);
610 }
611
612 static inline bool
613 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
614 {
615         struct z_erofs_vle_work *work = builder->work;
616
617         if (!work)
618                 return false;
619
620         z_erofs_pagevec_ctor_exit(&builder->vector, false);
621         mutex_unlock(&work->lock);
622
623         /*
624          * if all pending pages are added, don't hold work reference
625          * any longer if the current work isn't hosted by ourselves.
626          */
627         if (!builder->hosted)
628                 __z_erofs_vle_work_release(builder->grp, work);
629
630         builder->work = NULL;
631         builder->grp = NULL;
632         return true;
633 }
634
635 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
636                                                gfp_t gfp)
637 {
638         struct page *page = erofs_allocpage(pagepool, gfp);
639
640         if (unlikely(!page))
641                 return NULL;
642
643         page->mapping = Z_EROFS_MAPPING_STAGING;
644         return page;
645 }
646
647 struct z_erofs_vle_frontend {
648         struct inode *const inode;
649
650         struct z_erofs_vle_work_builder builder;
651         struct erofs_map_blocks map;
652
653         z_erofs_vle_owned_workgrp_t owned_head;
654
655         /* used for applying cache strategy on the fly */
656         bool backmost;
657         erofs_off_t headoffset;
658 };
659
660 #define VLE_FRONTEND_INIT(__i) { \
661         .inode = __i, \
662         .map = { \
663                 .m_llen = 0, \
664                 .m_plen = 0, \
665                 .mpage = NULL \
666         }, \
667         .builder = VLE_WORK_BUILDER_INIT(), \
668         .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
669         .backmost = true, }
670
671 #ifdef EROFS_FS_HAS_MANAGED_CACHE
672 static inline bool
673 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
674 {
675         if (fe->backmost)
676                 return true;
677
678         if (EROFS_FS_ZIP_CACHE_LVL >= 2)
679                 return la < fe->headoffset;
680
681         return false;
682 }
683 #else
684 static inline bool
685 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
686 {
687         return false;
688 }
689 #endif
690
691 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
692                                 struct page *page,
693                                 struct list_head *page_pool)
694 {
695         struct super_block *const sb = fe->inode->i_sb;
696         struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
697         struct erofs_map_blocks *const map = &fe->map;
698         struct z_erofs_vle_work_builder *const builder = &fe->builder;
699         const loff_t offset = page_offset(page);
700
701         bool tight = builder_is_hooked(builder);
702         struct z_erofs_vle_work *work = builder->work;
703
704         enum z_erofs_cache_alloctype cache_strategy;
705         enum z_erofs_page_type page_type;
706         unsigned int cur, end, spiltted, index;
707         int err = 0;
708
709         /* register locked file pages as online pages in pack */
710         z_erofs_onlinepage_init(page);
711
712         spiltted = 0;
713         end = PAGE_SIZE;
714 repeat:
715         cur = end - 1;
716
717         /* lucky, within the range of the current map_blocks */
718         if (offset + cur >= map->m_la &&
719             offset + cur < map->m_la + map->m_llen) {
720                 /* didn't get a valid unzip work previously (very rare) */
721                 if (!builder->work)
722                         goto restart_now;
723                 goto hitted;
724         }
725
726         /* go ahead the next map_blocks */
727         debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
728
729         if (z_erofs_vle_work_iter_end(builder))
730                 fe->backmost = false;
731
732         map->m_la = offset + cur;
733         map->m_llen = 0;
734         err = z_erofs_map_blocks_iter(fe->inode, map, 0);
735         if (unlikely(err))
736                 goto err_out;
737
738 restart_now:
739         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
740                 goto hitted;
741
742         DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
743         DBG_BUGON(erofs_blkoff(map->m_pa));
744
745         err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
746         if (unlikely(err))
747                 goto err_out;
748
749         /* preload all compressed pages (maybe downgrade role if necessary) */
750         if (should_alloc_managed_pages(fe, map->m_la))
751                 cache_strategy = DELAYEDALLOC;
752         else
753                 cache_strategy = DONTALLOC;
754
755         preload_compressed_pages(builder, MNGD_MAPPING(sbi),
756                                  map->m_pa / PAGE_SIZE,
757                                  map->m_plen / PAGE_SIZE,
758                                  cache_strategy, page_pool, GFP_KERNEL);
759
760         tight &= builder_is_hooked(builder);
761         work = builder->work;
762 hitted:
763         cur = end - min_t(unsigned int, offset + end - map->m_la, end);
764         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
765                 zero_user_segment(page, cur, end);
766                 goto next_part;
767         }
768
769         /* let's derive page type */
770         page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
771                 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
772                         (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
773                                 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
774
775         if (cur)
776                 tight &= builder_is_followed(builder);
777
778 retry:
779         err = z_erofs_vle_work_add_page(builder, page, page_type);
780         /* should allocate an additional staging page for pagevec */
781         if (err == -EAGAIN) {
782                 struct page *const newpage =
783                         __stagingpage_alloc(page_pool, GFP_NOFS);
784
785                 err = z_erofs_vle_work_add_page(builder, newpage,
786                                                 Z_EROFS_PAGE_TYPE_EXCLUSIVE);
787                 if (likely(!err))
788                         goto retry;
789         }
790
791         if (unlikely(err))
792                 goto err_out;
793
794         index = page->index - map->m_la / PAGE_SIZE;
795
796         /* FIXME! avoid the last relundant fixup & endio */
797         z_erofs_onlinepage_fixup(page, index, true);
798
799         /* bump up the number of spiltted parts of a page */
800         ++spiltted;
801         /* also update nr_pages */
802         work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
803 next_part:
804         /* can be used for verification */
805         map->m_llen = offset + cur - map->m_la;
806
807         end = cur;
808         if (end > 0)
809                 goto repeat;
810
811 out:
812         /* FIXME! avoid the last relundant fixup & endio */
813         z_erofs_onlinepage_endio(page);
814
815         debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
816                 __func__, page, spiltted, map->m_llen);
817         return err;
818
819         /* if some error occurred while processing this page */
820 err_out:
821         SetPageError(page);
822         goto out;
823 }
824
825 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
826 {
827         tagptr1_t t = tagptr_init(tagptr1_t, ptr);
828         struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
829         bool background = tagptr_unfold_tags(t);
830
831         if (!background) {
832                 unsigned long flags;
833
834                 spin_lock_irqsave(&io->u.wait.lock, flags);
835                 if (!atomic_add_return(bios, &io->pending_bios))
836                         wake_up_locked(&io->u.wait);
837                 spin_unlock_irqrestore(&io->u.wait.lock, flags);
838                 return;
839         }
840
841         if (!atomic_add_return(bios, &io->pending_bios))
842                 queue_work(z_erofs_workqueue, &io->u.work);
843 }
844
845 static inline void z_erofs_vle_read_endio(struct bio *bio)
846 {
847         struct erofs_sb_info *sbi = NULL;
848         blk_status_t err = bio->bi_status;
849         struct bio_vec *bvec;
850         struct bvec_iter_all iter_all;
851
852         bio_for_each_segment_all(bvec, bio, iter_all) {
853                 struct page *page = bvec->bv_page;
854                 bool cachemngd = false;
855
856                 DBG_BUGON(PageUptodate(page));
857                 DBG_BUGON(!page->mapping);
858
859                 if (unlikely(!sbi && !z_erofs_is_stagingpage(page))) {
860                         sbi = EROFS_SB(page->mapping->host->i_sb);
861
862                         if (time_to_inject(sbi, FAULT_READ_IO)) {
863                                 erofs_show_injection_info(FAULT_READ_IO);
864                                 err = BLK_STS_IOERR;
865                         }
866                 }
867
868                 /* sbi should already be gotten if the page is managed */
869                 if (sbi)
870                         cachemngd = erofs_page_is_managed(sbi, page);
871
872                 if (unlikely(err))
873                         SetPageError(page);
874                 else if (cachemngd)
875                         SetPageUptodate(page);
876
877                 if (cachemngd)
878                         unlock_page(page);
879         }
880
881         z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
882         bio_put(bio);
883 }
884
885 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
886 static DEFINE_MUTEX(z_pagemap_global_lock);
887
888 static int z_erofs_vle_unzip(struct super_block *sb,
889                              struct z_erofs_vle_workgroup *grp,
890                              struct list_head *page_pool)
891 {
892         struct erofs_sb_info *const sbi = EROFS_SB(sb);
893         const unsigned int clusterpages = erofs_clusterpages(sbi);
894
895         struct z_erofs_pagevec_ctor ctor;
896         unsigned int nr_pages;
897         unsigned int sparsemem_pages = 0;
898         struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
899         struct page **pages, **compressed_pages, *page;
900         unsigned int i, llen;
901
902         enum z_erofs_page_type page_type;
903         bool overlapped;
904         struct z_erofs_vle_work *work;
905         void *vout;
906         int err;
907
908         might_sleep();
909         work = z_erofs_vle_grab_primary_work(grp);
910         DBG_BUGON(!READ_ONCE(work->nr_pages));
911
912         mutex_lock(&work->lock);
913         nr_pages = work->nr_pages;
914
915         if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
916                 pages = pages_onstack;
917         else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
918                  mutex_trylock(&z_pagemap_global_lock))
919                 pages = z_pagemap_global;
920         else {
921 repeat:
922                 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
923                                        GFP_KERNEL);
924
925                 /* fallback to global pagemap for the lowmem scenario */
926                 if (unlikely(!pages)) {
927                         if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
928                                 goto repeat;
929                         else {
930                                 mutex_lock(&z_pagemap_global_lock);
931                                 pages = z_pagemap_global;
932                         }
933                 }
934         }
935
936         for (i = 0; i < nr_pages; ++i)
937                 pages[i] = NULL;
938
939         z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_VLE_INLINE_PAGEVECS,
940                                   work->pagevec, 0);
941
942         for (i = 0; i < work->vcnt; ++i) {
943                 unsigned int pagenr;
944
945                 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
946
947                 /* all pages in pagevec ought to be valid */
948                 DBG_BUGON(!page);
949                 DBG_BUGON(!page->mapping);
950
951                 if (z_erofs_gather_if_stagingpage(page_pool, page))
952                         continue;
953
954                 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
955                         pagenr = 0;
956                 else
957                         pagenr = z_erofs_onlinepage_index(page);
958
959                 DBG_BUGON(pagenr >= nr_pages);
960                 DBG_BUGON(pages[pagenr]);
961
962                 pages[pagenr] = page;
963         }
964         sparsemem_pages = i;
965
966         z_erofs_pagevec_ctor_exit(&ctor, true);
967
968         overlapped = false;
969         compressed_pages = grp->compressed_pages;
970
971         err = 0;
972         for (i = 0; i < clusterpages; ++i) {
973                 unsigned int pagenr;
974
975                 page = compressed_pages[i];
976
977                 /* all compressed pages ought to be valid */
978                 DBG_BUGON(!page);
979                 DBG_BUGON(!page->mapping);
980
981                 if (!z_erofs_is_stagingpage(page)) {
982                         if (erofs_page_is_managed(sbi, page)) {
983                                 if (unlikely(!PageUptodate(page)))
984                                         err = -EIO;
985                                 continue;
986                         }
987
988                         /*
989                          * only if non-head page can be selected
990                          * for inplace decompression
991                          */
992                         pagenr = z_erofs_onlinepage_index(page);
993
994                         DBG_BUGON(pagenr >= nr_pages);
995                         DBG_BUGON(pages[pagenr]);
996                         ++sparsemem_pages;
997                         pages[pagenr] = page;
998
999                         overlapped = true;
1000                 }
1001
1002                 /* PG_error needs checking for inplaced and staging pages */
1003                 if (unlikely(PageError(page))) {
1004                         DBG_BUGON(PageUptodate(page));
1005                         err = -EIO;
1006                 }
1007         }
1008
1009         if (unlikely(err))
1010                 goto out;
1011
1012         llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1013
1014         if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
1015                 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
1016                                              pages, nr_pages, work->pageofs);
1017                 goto out;
1018         }
1019
1020         if (llen > grp->llen)
1021                 llen = grp->llen;
1022
1023         err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
1024                                             pages, llen, work->pageofs);
1025         if (err != -ENOTSUPP)
1026                 goto out;
1027
1028         if (sparsemem_pages >= nr_pages)
1029                 goto skip_allocpage;
1030
1031         for (i = 0; i < nr_pages; ++i) {
1032                 if (pages[i])
1033                         continue;
1034
1035                 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
1036         }
1037
1038 skip_allocpage:
1039         vout = erofs_vmap(pages, nr_pages);
1040         if (!vout) {
1041                 err = -ENOMEM;
1042                 goto out;
1043         }
1044
1045         err = z_erofs_vle_unzip_vmap(compressed_pages, clusterpages, vout,
1046                                      llen, work->pageofs, overlapped);
1047
1048         erofs_vunmap(vout, nr_pages);
1049
1050 out:
1051         /* must handle all compressed pages before endding pages */
1052         for (i = 0; i < clusterpages; ++i) {
1053                 page = compressed_pages[i];
1054
1055                 if (erofs_page_is_managed(sbi, page))
1056                         continue;
1057
1058                 /* recycle all individual staging pages */
1059                 (void)z_erofs_gather_if_stagingpage(page_pool, page);
1060
1061                 WRITE_ONCE(compressed_pages[i], NULL);
1062         }
1063
1064         for (i = 0; i < nr_pages; ++i) {
1065                 page = pages[i];
1066                 if (!page)
1067                         continue;
1068
1069                 DBG_BUGON(!page->mapping);
1070
1071                 /* recycle all individual staging pages */
1072                 if (z_erofs_gather_if_stagingpage(page_pool, page))
1073                         continue;
1074
1075                 if (unlikely(err < 0))
1076                         SetPageError(page);
1077
1078                 z_erofs_onlinepage_endio(page);
1079         }
1080
1081         if (pages == z_pagemap_global)
1082                 mutex_unlock(&z_pagemap_global_lock);
1083         else if (unlikely(pages != pages_onstack))
1084                 kvfree(pages);
1085
1086         work->nr_pages = 0;
1087         work->vcnt = 0;
1088
1089         /* all work locks MUST be taken before the following line */
1090
1091         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1092
1093         /* all work locks SHOULD be released right now */
1094         mutex_unlock(&work->lock);
1095
1096         z_erofs_vle_work_release(work);
1097         return err;
1098 }
1099
1100 static void z_erofs_vle_unzip_all(struct super_block *sb,
1101                                   struct z_erofs_vle_unzip_io *io,
1102                                   struct list_head *page_pool)
1103 {
1104         z_erofs_vle_owned_workgrp_t owned = io->head;
1105
1106         while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1107                 struct z_erofs_vle_workgroup *grp;
1108
1109                 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1110                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1111
1112                 /* no possible that 'owned' equals NULL */
1113                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1114
1115                 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1116                 owned = READ_ONCE(grp->next);
1117
1118                 z_erofs_vle_unzip(sb, grp, page_pool);
1119         }
1120 }
1121
1122 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1123 {
1124         struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1125                 struct z_erofs_vle_unzip_io_sb, io.u.work);
1126         LIST_HEAD(page_pool);
1127
1128         DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1129         z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1130
1131         put_pages_list(&page_pool);
1132         kvfree(iosb);
1133 }
1134
1135 static struct page *
1136 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1137                            unsigned int nr,
1138                            struct list_head *pagepool,
1139                            struct address_space *mc,
1140                            gfp_t gfp)
1141 {
1142         /* determined at compile time to avoid too many #ifdefs */
1143         const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1144         const pgoff_t index = grp->obj.index;
1145         bool tocache = false;
1146
1147         struct address_space *mapping;
1148         struct page *oldpage, *page;
1149
1150         compressed_page_t t;
1151         int justfound;
1152
1153 repeat:
1154         page = READ_ONCE(grp->compressed_pages[nr]);
1155         oldpage = page;
1156
1157         if (!page)
1158                 goto out_allocpage;
1159
1160         /*
1161          * the cached page has not been allocated and
1162          * an placeholder is out there, prepare it now.
1163          */
1164         if (!nocache && page == PAGE_UNALLOCATED) {
1165                 tocache = true;
1166                 goto out_allocpage;
1167         }
1168
1169         /* process the target tagged pointer */
1170         t = tagptr_init(compressed_page_t, page);
1171         justfound = tagptr_unfold_tags(t);
1172         page = tagptr_unfold_ptr(t);
1173
1174         mapping = READ_ONCE(page->mapping);
1175
1176         /*
1177          * if managed cache is disabled, it's no way to
1178          * get such a cached-like page.
1179          */
1180         if (nocache) {
1181                 /* if managed cache is disabled, it is impossible `justfound' */
1182                 DBG_BUGON(justfound);
1183
1184                 /* and it should be locked, not uptodate, and not truncated */
1185                 DBG_BUGON(!PageLocked(page));
1186                 DBG_BUGON(PageUptodate(page));
1187                 DBG_BUGON(!mapping);
1188                 goto out;
1189         }
1190
1191         /*
1192          * unmanaged (file) pages are all locked solidly,
1193          * therefore it is impossible for `mapping' to be NULL.
1194          */
1195         if (mapping && mapping != mc)
1196                 /* ought to be unmanaged pages */
1197                 goto out;
1198
1199         lock_page(page);
1200
1201         /* only true if page reclaim goes wrong, should never happen */
1202         DBG_BUGON(justfound && PagePrivate(page));
1203
1204         /* the page is still in manage cache */
1205         if (page->mapping == mc) {
1206                 WRITE_ONCE(grp->compressed_pages[nr], page);
1207
1208                 ClearPageError(page);
1209                 if (!PagePrivate(page)) {
1210                         /*
1211                          * impossible to be !PagePrivate(page) for
1212                          * the current restriction as well if
1213                          * the page is already in compressed_pages[].
1214                          */
1215                         DBG_BUGON(!justfound);
1216
1217                         justfound = 0;
1218                         set_page_private(page, (unsigned long)grp);
1219                         SetPagePrivate(page);
1220                 }
1221
1222                 /* no need to submit io if it is already up-to-date */
1223                 if (PageUptodate(page)) {
1224                         unlock_page(page);
1225                         page = NULL;
1226                 }
1227                 goto out;
1228         }
1229
1230         /*
1231          * the managed page has been truncated, it's unsafe to
1232          * reuse this one, let's allocate a new cache-managed page.
1233          */
1234         DBG_BUGON(page->mapping);
1235         DBG_BUGON(!justfound);
1236
1237         tocache = true;
1238         unlock_page(page);
1239         put_page(page);
1240 out_allocpage:
1241         page = __stagingpage_alloc(pagepool, gfp);
1242         if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1243                 list_add(&page->lru, pagepool);
1244                 cpu_relax();
1245                 goto repeat;
1246         }
1247         if (nocache || !tocache)
1248                 goto out;
1249         if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1250                 page->mapping = Z_EROFS_MAPPING_STAGING;
1251                 goto out;
1252         }
1253
1254         set_page_private(page, (unsigned long)grp);
1255         SetPagePrivate(page);
1256 out:    /* the only exit (for tracing and debugging) */
1257         return page;
1258 }
1259
1260 static struct z_erofs_vle_unzip_io *
1261 jobqueue_init(struct super_block *sb,
1262               struct z_erofs_vle_unzip_io *io,
1263               bool foreground)
1264 {
1265         struct z_erofs_vle_unzip_io_sb *iosb;
1266
1267         if (foreground) {
1268                 /* waitqueue available for foreground io */
1269                 DBG_BUGON(!io);
1270
1271                 init_waitqueue_head(&io->u.wait);
1272                 atomic_set(&io->pending_bios, 0);
1273                 goto out;
1274         }
1275
1276         iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1277                         GFP_KERNEL | __GFP_NOFAIL);
1278         DBG_BUGON(!iosb);
1279
1280         /* initialize fields in the allocated descriptor */
1281         io = &iosb->io;
1282         iosb->sb = sb;
1283         INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1284 out:
1285         io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1286         return io;
1287 }
1288
1289 /* define workgroup jobqueue types */
1290 enum {
1291 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1292         JQ_BYPASS,
1293 #endif
1294         JQ_SUBMIT,
1295         NR_JOBQUEUES,
1296 };
1297
1298 static void *jobqueueset_init(struct super_block *sb,
1299                               z_erofs_vle_owned_workgrp_t qtail[],
1300                               struct z_erofs_vle_unzip_io *q[],
1301                               struct z_erofs_vle_unzip_io *fgq,
1302                               bool forcefg)
1303 {
1304 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1305         /*
1306          * if managed cache is enabled, bypass jobqueue is needed,
1307          * no need to read from device for all workgroups in this queue.
1308          */
1309         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1310         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1311 #endif
1312
1313         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1314         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1315
1316         return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1317 }
1318
1319 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1320 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1321                                     z_erofs_vle_owned_workgrp_t qtail[],
1322                                     z_erofs_vle_owned_workgrp_t owned_head)
1323 {
1324         z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1325         z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1326
1327         DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1328         if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1329                 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1330
1331         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1332
1333         WRITE_ONCE(*submit_qtail, owned_head);
1334         WRITE_ONCE(*bypass_qtail, &grp->next);
1335
1336         qtail[JQ_BYPASS] = &grp->next;
1337 }
1338
1339 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1340                                        unsigned int nr_bios,
1341                                        bool force_fg)
1342 {
1343         /*
1344          * although background is preferred, no one is pending for submission.
1345          * don't issue workqueue for decompression but drop it directly instead.
1346          */
1347         if (force_fg || nr_bios)
1348                 return false;
1349
1350         kvfree(container_of(q[JQ_SUBMIT],
1351                             struct z_erofs_vle_unzip_io_sb,
1352                             io));
1353         return true;
1354 }
1355 #else
1356 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1357                                     z_erofs_vle_owned_workgrp_t qtail[],
1358                                     z_erofs_vle_owned_workgrp_t owned_head)
1359 {
1360         /* impossible to bypass submission for managed cache disabled */
1361         DBG_BUGON(1);
1362 }
1363
1364 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1365                                        unsigned int nr_bios,
1366                                        bool force_fg)
1367 {
1368         /* bios should be >0 if managed cache is disabled */
1369         DBG_BUGON(!nr_bios);
1370         return false;
1371 }
1372 #endif
1373
1374 static bool z_erofs_vle_submit_all(struct super_block *sb,
1375                                    z_erofs_vle_owned_workgrp_t owned_head,
1376                                    struct list_head *pagepool,
1377                                    struct z_erofs_vle_unzip_io *fgq,
1378                                    bool force_fg)
1379 {
1380         struct erofs_sb_info *const sbi = EROFS_SB(sb);
1381         const unsigned int clusterpages = erofs_clusterpages(sbi);
1382         const gfp_t gfp = GFP_NOFS;
1383
1384         z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1385         struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1386         struct bio *bio;
1387         void *bi_private;
1388         /* since bio will be NULL, no need to initialize last_index */
1389         pgoff_t uninitialized_var(last_index);
1390         bool force_submit = false;
1391         unsigned int nr_bios;
1392
1393         if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1394                 return false;
1395
1396         force_submit = false;
1397         bio = NULL;
1398         nr_bios = 0;
1399         bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1400
1401         /* by default, all need io submission */
1402         q[JQ_SUBMIT]->head = owned_head;
1403
1404         do {
1405                 struct z_erofs_vle_workgroup *grp;
1406                 pgoff_t first_index;
1407                 struct page *page;
1408                 unsigned int i = 0, bypass = 0;
1409                 int err;
1410
1411                 /* no possible 'owned_head' equals the following */
1412                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1413                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1414
1415                 grp = container_of(owned_head,
1416                                    struct z_erofs_vle_workgroup, next);
1417
1418                 /* close the main owned chain at first */
1419                 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1420                                      Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1421
1422                 first_index = grp->obj.index;
1423                 force_submit |= (first_index != last_index + 1);
1424
1425 repeat:
1426                 page = pickup_page_for_submission(grp, i, pagepool,
1427                                                   MNGD_MAPPING(sbi), gfp);
1428                 if (!page) {
1429                         force_submit = true;
1430                         ++bypass;
1431                         goto skippage;
1432                 }
1433
1434                 if (bio && force_submit) {
1435 submit_bio_retry:
1436                         __submit_bio(bio, REQ_OP_READ, 0);
1437                         bio = NULL;
1438                 }
1439
1440                 if (!bio) {
1441                         bio = erofs_grab_bio(sb, first_index + i,
1442                                              BIO_MAX_PAGES, bi_private,
1443                                              z_erofs_vle_read_endio, true);
1444                         ++nr_bios;
1445                 }
1446
1447                 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1448                 if (err < PAGE_SIZE)
1449                         goto submit_bio_retry;
1450
1451                 force_submit = false;
1452                 last_index = first_index + i;
1453 skippage:
1454                 if (++i < clusterpages)
1455                         goto repeat;
1456
1457                 if (bypass < clusterpages)
1458                         qtail[JQ_SUBMIT] = &grp->next;
1459                 else
1460                         move_to_bypass_jobqueue(grp, qtail, owned_head);
1461         } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1462
1463         if (bio)
1464                 __submit_bio(bio, REQ_OP_READ, 0);
1465
1466         if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1467                 return true;
1468
1469         z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1470         return true;
1471 }
1472
1473 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1474                                      struct list_head *pagepool,
1475                                      bool force_fg)
1476 {
1477         struct super_block *sb = f->inode->i_sb;
1478         struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1479
1480         if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1481                 return;
1482
1483 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1484         z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1485 #endif
1486         if (!force_fg)
1487                 return;
1488
1489         /* wait until all bios are completed */
1490         wait_event(io[JQ_SUBMIT].u.wait,
1491                    !atomic_read(&io[JQ_SUBMIT].pending_bios));
1492
1493         /* let's synchronous decompression */
1494         z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1495 }
1496
1497 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1498                                              struct page *page)
1499 {
1500         struct inode *const inode = page->mapping->host;
1501         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1502         int err;
1503         LIST_HEAD(pagepool);
1504
1505         trace_erofs_readpage(page, false);
1506
1507         f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1508
1509         err = z_erofs_do_read_page(&f, page, &pagepool);
1510         (void)z_erofs_vle_work_iter_end(&f.builder);
1511
1512         if (err) {
1513                 errln("%s, failed to read, err [%d]", __func__, err);
1514                 goto out;
1515         }
1516
1517         z_erofs_submit_and_unzip(&f, &pagepool, true);
1518 out:
1519         if (f.map.mpage)
1520                 put_page(f.map.mpage);
1521
1522         /* clean up the remaining free pages */
1523         put_pages_list(&pagepool);
1524         return 0;
1525 }
1526
1527 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1528                                               struct address_space *mapping,
1529                                               struct list_head *pages,
1530                                               unsigned int nr_pages)
1531 {
1532         struct inode *const inode = mapping->host;
1533         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1534
1535         bool sync = __should_decompress_synchronously(sbi, nr_pages);
1536         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1537         gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1538         struct page *head = NULL;
1539         LIST_HEAD(pagepool);
1540
1541         trace_erofs_readpages(mapping->host, lru_to_page(pages),
1542                               nr_pages, false);
1543
1544         f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1545
1546         for (; nr_pages; --nr_pages) {
1547                 struct page *page = lru_to_page(pages);
1548
1549                 prefetchw(&page->flags);
1550                 list_del(&page->lru);
1551
1552                 /*
1553                  * A pure asynchronous readahead is indicated if
1554                  * a PG_readahead marked page is hitted at first.
1555                  * Let's also do asynchronous decompression for this case.
1556                  */
1557                 sync &= !(PageReadahead(page) && !head);
1558
1559                 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1560                         list_add(&page->lru, &pagepool);
1561                         continue;
1562                 }
1563
1564                 set_page_private(page, (unsigned long)head);
1565                 head = page;
1566         }
1567
1568         while (head) {
1569                 struct page *page = head;
1570                 int err;
1571
1572                 /* traversal in reverse order */
1573                 head = (void *)page_private(page);
1574
1575                 err = z_erofs_do_read_page(&f, page, &pagepool);
1576                 if (err) {
1577                         struct erofs_vnode *vi = EROFS_V(inode);
1578
1579                         errln("%s, readahead error at page %lu of nid %llu",
1580                               __func__, page->index, vi->nid);
1581                 }
1582
1583                 put_page(page);
1584         }
1585
1586         (void)z_erofs_vle_work_iter_end(&f.builder);
1587
1588         z_erofs_submit_and_unzip(&f, &pagepool, sync);
1589
1590         if (f.map.mpage)
1591                 put_page(f.map.mpage);
1592
1593         /* clean up the remaining free pages */
1594         put_pages_list(&pagepool);
1595         return 0;
1596 }
1597
1598 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1599         .readpage = z_erofs_vle_normalaccess_readpage,
1600         .readpages = z_erofs_vle_normalaccess_readpages,
1601 };
1602
1603 /*
1604  * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1605  * ---
1606  * VLE compression mode attempts to compress a number of logical data into
1607  * a physical cluster with a fixed size.
1608  * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1609  */
1610 #define __vle_cluster_advise(x, bit, bits) \
1611         ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1612
1613 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1614         Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1615
1616 #define vle_cluster_type(di)    \
1617         __vle_cluster_type((di)->di_advise)
1618
1619 static int
1620 vle_decompressed_index_clusterofs(unsigned int *clusterofs,
1621                                   unsigned int clustersize,
1622                                   struct z_erofs_vle_decompressed_index *di)
1623 {
1624         switch (vle_cluster_type(di)) {
1625         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1626                 *clusterofs = clustersize;
1627                 break;
1628         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1629         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1630                 *clusterofs = le16_to_cpu(di->di_clusterofs);
1631                 break;
1632         default:
1633                 DBG_BUGON(1);
1634                 return -EIO;
1635         }
1636         return 0;
1637 }
1638
1639 static inline erofs_blk_t
1640 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1641 {
1642         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1643         struct erofs_vnode *vi = EROFS_V(inode);
1644
1645         unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1646                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1647                 index * sizeof(struct z_erofs_vle_decompressed_index);
1648
1649         return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1650 }
1651
1652 static inline unsigned int
1653 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1654 {
1655         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1656         struct erofs_vnode *vi = EROFS_V(inode);
1657
1658         unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1659                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1660                 index * sizeof(struct z_erofs_vle_decompressed_index);
1661
1662         return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1663 }
1664
1665 struct vle_map_blocks_iter_ctx {
1666         struct inode *inode;
1667         struct super_block *sb;
1668         unsigned int clusterbits;
1669
1670         struct page **mpage_ret;
1671         void **kaddr_ret;
1672 };
1673
1674 static int
1675 vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
1676                             unsigned int lcn,   /* logical cluster number */
1677                             unsigned long long *ofs,
1678                             erofs_blk_t *pblk,
1679                             unsigned int *flags)
1680 {
1681         const unsigned int clustersize = 1 << ctx->clusterbits;
1682         const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
1683         struct page *mpage = *ctx->mpage_ret;   /* extent metapage */
1684
1685         struct z_erofs_vle_decompressed_index *di;
1686         unsigned int cluster_type, delta0;
1687
1688         if (mpage->index != mblk) {
1689                 kunmap_atomic(*ctx->kaddr_ret);
1690                 unlock_page(mpage);
1691                 put_page(mpage);
1692
1693                 mpage = erofs_get_meta_page(ctx->sb, mblk, false);
1694                 if (IS_ERR(mpage)) {
1695                         *ctx->mpage_ret = NULL;
1696                         return PTR_ERR(mpage);
1697                 }
1698                 *ctx->mpage_ret = mpage;
1699                 *ctx->kaddr_ret = kmap_atomic(mpage);
1700         }
1701
1702         di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
1703
1704         cluster_type = vle_cluster_type(di);
1705         switch (cluster_type) {
1706         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1707                 delta0 = le16_to_cpu(di->di_u.delta[0]);
1708                 if (unlikely(!delta0 || delta0 > lcn)) {
1709                         errln("invalid NONHEAD dl0 %u at lcn %u of nid %llu",
1710                               delta0, lcn, EROFS_V(ctx->inode)->nid);
1711                         DBG_BUGON(1);
1712                         return -EIO;
1713                 }
1714                 return vle_get_logical_extent_head(ctx,
1715                         lcn - delta0, ofs, pblk, flags);
1716         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1717                 *flags ^= EROFS_MAP_ZIPPED;
1718                 /* fallthrough */
1719         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1720                 /* clustersize should be a power of two */
1721                 *ofs = ((u64)lcn << ctx->clusterbits) +
1722                         (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1723                 *pblk = le32_to_cpu(di->di_u.blkaddr);
1724                 break;
1725         default:
1726                 errln("unknown cluster type %u at lcn %u of nid %llu",
1727                       cluster_type, lcn, EROFS_V(ctx->inode)->nid);
1728                 DBG_BUGON(1);
1729                 return -EIO;
1730         }
1731         return 0;
1732 }
1733
1734 int z_erofs_map_blocks_iter(struct inode *inode,
1735                             struct erofs_map_blocks *map,
1736                             int flags)
1737 {
1738         void *kaddr;
1739         const struct vle_map_blocks_iter_ctx ctx = {
1740                 .inode = inode,
1741                 .sb = inode->i_sb,
1742                 .clusterbits = EROFS_I_SB(inode)->clusterbits,
1743                 .mpage_ret = &map->mpage,
1744                 .kaddr_ret = &kaddr
1745         };
1746         const unsigned int clustersize = 1 << ctx.clusterbits;
1747         /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1748         const bool initial = !map->m_llen;
1749
1750         /* logicial extent (start, end) offset */
1751         unsigned long long ofs, end;
1752         unsigned int lcn;
1753         u32 ofs_rem;
1754
1755         /* initialize `pblk' to keep gcc from printing foolish warnings */
1756         erofs_blk_t mblk, pblk = 0;
1757         struct page *mpage = map->mpage;
1758         struct z_erofs_vle_decompressed_index *di;
1759         unsigned int cluster_type, logical_cluster_ofs;
1760         int err = 0;
1761
1762         trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
1763
1764         /* when trying to read beyond EOF, leave it unmapped */
1765         if (unlikely(map->m_la >= inode->i_size)) {
1766                 DBG_BUGON(!initial);
1767                 map->m_llen = map->m_la + 1 - inode->i_size;
1768                 map->m_la = inode->i_size;
1769                 map->m_flags = 0;
1770                 goto out;
1771         }
1772
1773         debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1774                 map->m_la, map->m_llen);
1775
1776         ofs = map->m_la + map->m_llen;
1777
1778         /* clustersize should be power of two */
1779         lcn = ofs >> ctx.clusterbits;
1780         ofs_rem = ofs & (clustersize - 1);
1781
1782         mblk = vle_extent_blkaddr(inode, lcn);
1783
1784         if (!mpage || mpage->index != mblk) {
1785                 if (mpage)
1786                         put_page(mpage);
1787
1788                 mpage = erofs_get_meta_page(ctx.sb, mblk, false);
1789                 if (IS_ERR(mpage)) {
1790                         err = PTR_ERR(mpage);
1791                         goto out;
1792                 }
1793                 map->mpage = mpage;
1794         } else {
1795                 lock_page(mpage);
1796                 DBG_BUGON(!PageUptodate(mpage));
1797         }
1798
1799         kaddr = kmap_atomic(mpage);
1800         di = kaddr + vle_extent_blkoff(inode, lcn);
1801
1802         debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
1803                 mblk, vle_extent_blkoff(inode, lcn));
1804
1805         err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
1806                                                 clustersize, di);
1807         if (unlikely(err))
1808                 goto unmap_out;
1809
1810         if (!initial) {
1811                 /* [walking mode] 'map' has been already initialized */
1812                 map->m_llen += logical_cluster_ofs;
1813                 goto unmap_out;
1814         }
1815
1816         /* by default, compressed */
1817         map->m_flags |= EROFS_MAP_ZIPPED;
1818
1819         end = ((u64)lcn + 1) * clustersize;
1820
1821         cluster_type = vle_cluster_type(di);
1822
1823         switch (cluster_type) {
1824         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1825                 if (ofs_rem >= logical_cluster_ofs)
1826                         map->m_flags ^= EROFS_MAP_ZIPPED;
1827                 /* fallthrough */
1828         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1829                 if (ofs_rem == logical_cluster_ofs) {
1830                         pblk = le32_to_cpu(di->di_u.blkaddr);
1831                         goto exact_hitted;
1832                 }
1833
1834                 if (ofs_rem > logical_cluster_ofs) {
1835                         ofs = (u64)lcn * clustersize | logical_cluster_ofs;
1836                         pblk = le32_to_cpu(di->di_u.blkaddr);
1837                         break;
1838                 }
1839
1840                 /* logical cluster number should be >= 1 */
1841                 if (unlikely(!lcn)) {
1842                         errln("invalid logical cluster 0 at nid %llu",
1843                               EROFS_V(inode)->nid);
1844                         err = -EIO;
1845                         goto unmap_out;
1846                 }
1847                 end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
1848                 /* fallthrough */
1849         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1850                 /* get the correspoinding first chunk */
1851                 err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
1852                                                   &pblk, &map->m_flags);
1853                 mpage = map->mpage;
1854
1855                 if (unlikely(err)) {
1856                         if (mpage)
1857                                 goto unmap_out;
1858                         goto out;
1859                 }
1860                 break;
1861         default:
1862                 errln("unknown cluster type %u at offset %llu of nid %llu",
1863                       cluster_type, ofs, EROFS_V(inode)->nid);
1864                 err = -EIO;
1865                 goto unmap_out;
1866         }
1867
1868         map->m_la = ofs;
1869 exact_hitted:
1870         map->m_llen = end - ofs;
1871         map->m_plen = clustersize;
1872         map->m_pa = blknr_to_addr(pblk);
1873         map->m_flags |= EROFS_MAP_MAPPED;
1874 unmap_out:
1875         kunmap_atomic(kaddr);
1876         unlock_page(mpage);
1877 out:
1878         debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1879                 __func__, map->m_la, map->m_pa,
1880                 map->m_llen, map->m_plen, map->m_flags);
1881
1882         trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
1883
1884         /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1885         DBG_BUGON(err < 0 && err != -ENOMEM);
1886         return err;
1887 }
1888