]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/erofs/unzip_vle.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha
[linux.git] / drivers / staging / erofs / unzip_vle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
15
16 #include <trace/events/erofs.h>
17
18 /*
19  * a compressed_pages[] placeholder in order to avoid
20  * being filled with file pages for in-place decompression.
21  */
22 #define PAGE_UNALLOCATED     ((void *)0x5F0E4B1D)
23
24 /* how to allocate cached pages for a workgroup */
25 enum z_erofs_cache_alloctype {
26         DONTALLOC,      /* don't allocate any cached pages */
27         DELAYEDALLOC,   /* delayed allocation (at the time of submitting io) */
28 };
29
30 /*
31  * tagged pointer with 1-bit tag for all compressed pages
32  * tag 0 - the page is just found with an extra page reference
33  */
34 typedef tagptr1_t compressed_page_t;
35
36 #define tag_compressed_page_justfound(page) \
37         tagptr_fold(compressed_page_t, page, 1)
38
39 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
40 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
41
42 void z_erofs_exit_zip_subsystem(void)
43 {
44         destroy_workqueue(z_erofs_workqueue);
45         kmem_cache_destroy(z_erofs_workgroup_cachep);
46 }
47
48 static inline int init_unzip_workqueue(void)
49 {
50         const unsigned int onlinecpus = num_possible_cpus();
51
52         /*
53          * we don't need too many threads, limiting threads
54          * could improve scheduling performance.
55          */
56         z_erofs_workqueue =
57                 alloc_workqueue("erofs_unzipd",
58                                 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
59                                 onlinecpus + onlinecpus / 4);
60
61         return z_erofs_workqueue ? 0 : -ENOMEM;
62 }
63
64 static void init_once(void *ptr)
65 {
66         struct z_erofs_vle_workgroup *grp = ptr;
67         struct z_erofs_vle_work *const work =
68                 z_erofs_vle_grab_primary_work(grp);
69         unsigned int i;
70
71         mutex_init(&work->lock);
72         work->nr_pages = 0;
73         work->vcnt = 0;
74         for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
75                 grp->compressed_pages[i] = NULL;
76 }
77
78 static void init_always(struct z_erofs_vle_workgroup *grp)
79 {
80         struct z_erofs_vle_work *const work =
81                 z_erofs_vle_grab_primary_work(grp);
82
83         atomic_set(&grp->obj.refcount, 1);
84         grp->flags = 0;
85
86         DBG_BUGON(work->nr_pages);
87         DBG_BUGON(work->vcnt);
88 }
89
90 int __init z_erofs_init_zip_subsystem(void)
91 {
92         z_erofs_workgroup_cachep =
93                 kmem_cache_create("erofs_compress",
94                                   Z_EROFS_WORKGROUP_SIZE, 0,
95                                   SLAB_RECLAIM_ACCOUNT, init_once);
96
97         if (z_erofs_workgroup_cachep) {
98                 if (!init_unzip_workqueue())
99                         return 0;
100
101                 kmem_cache_destroy(z_erofs_workgroup_cachep);
102         }
103         return -ENOMEM;
104 }
105
106 enum z_erofs_vle_work_role {
107         Z_EROFS_VLE_WORK_SECONDARY,
108         Z_EROFS_VLE_WORK_PRIMARY,
109         /*
110          * The current work has at least been linked with the following
111          * processed chained works, which means if the processing page
112          * is the tail partial page of the work, the current work can
113          * safely use the whole page, as illustrated below:
114          * +--------------+-------------------------------------------+
115          * |  tail page   |      head page (of the previous work)     |
116          * +--------------+-------------------------------------------+
117          *   /\  which belongs to the current work
118          * [  (*) this page can be used for the current work itself.  ]
119          */
120         Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
121         Z_EROFS_VLE_WORK_MAX
122 };
123
124 struct z_erofs_vle_work_builder {
125         enum z_erofs_vle_work_role role;
126         /*
127          * 'hosted = false' means that the current workgroup doesn't belong to
128          * the owned chained workgroups. In the other words, it is none of our
129          * business to submit this workgroup.
130          */
131         bool hosted;
132
133         struct z_erofs_vle_workgroup *grp;
134         struct z_erofs_vle_work *work;
135         struct z_erofs_pagevec_ctor vector;
136
137         /* pages used for reading the compressed data */
138         struct page **compressed_pages;
139         unsigned int compressed_deficit;
140 };
141
142 #define VLE_WORK_BUILDER_INIT() \
143         { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
144
145 #ifdef EROFS_FS_HAS_MANAGED_CACHE
146 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
147                                      struct address_space *mc,
148                                      pgoff_t index,
149                                      unsigned int clusterpages,
150                                      enum z_erofs_cache_alloctype type,
151                                      struct list_head *pagepool,
152                                      gfp_t gfp)
153 {
154         struct page **const pages = bl->compressed_pages;
155         const unsigned int remaining = bl->compressed_deficit;
156         bool standalone = true;
157         unsigned int i, j = 0;
158
159         if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
160                 return;
161
162         gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
163
164         index += clusterpages - remaining;
165
166         for (i = 0; i < remaining; ++i) {
167                 struct page *page;
168                 compressed_page_t t;
169
170                 /* the compressed page was loaded before */
171                 if (READ_ONCE(pages[i]))
172                         continue;
173
174                 page = find_get_page(mc, index + i);
175
176                 if (page) {
177                         t = tag_compressed_page_justfound(page);
178                 } else if (type == DELAYEDALLOC) {
179                         t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
180                 } else {        /* DONTALLOC */
181                         if (standalone)
182                                 j = i;
183                         standalone = false;
184                         continue;
185                 }
186
187                 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
188                         continue;
189
190                 if (page)
191                         put_page(page);
192         }
193         bl->compressed_pages += j;
194         bl->compressed_deficit = remaining - j;
195
196         if (standalone)
197                 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
198 }
199
200 /* called by erofs_shrinker to get rid of all compressed_pages */
201 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
202                                        struct erofs_workgroup *egrp)
203 {
204         struct z_erofs_vle_workgroup *const grp =
205                 container_of(egrp, struct z_erofs_vle_workgroup, obj);
206         struct address_space *const mapping = MNGD_MAPPING(sbi);
207         const int clusterpages = erofs_clusterpages(sbi);
208         int i;
209
210         /*
211          * refcount of workgroup is now freezed as 1,
212          * therefore no need to worry about available decompression users.
213          */
214         for (i = 0; i < clusterpages; ++i) {
215                 struct page *page = grp->compressed_pages[i];
216
217                 if (!page || page->mapping != mapping)
218                         continue;
219
220                 /* block other users from reclaiming or migrating the page */
221                 if (!trylock_page(page))
222                         return -EBUSY;
223
224                 /* barrier is implied in the following 'unlock_page' */
225                 WRITE_ONCE(grp->compressed_pages[i], NULL);
226
227                 set_page_private(page, 0);
228                 ClearPagePrivate(page);
229
230                 unlock_page(page);
231                 put_page(page);
232         }
233         return 0;
234 }
235
236 int erofs_try_to_free_cached_page(struct address_space *mapping,
237                                   struct page *page)
238 {
239         struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
240         const unsigned int clusterpages = erofs_clusterpages(sbi);
241
242         struct z_erofs_vle_workgroup *grp;
243         int ret = 0;    /* 0 - busy */
244
245         /* prevent the workgroup from being freed */
246         rcu_read_lock();
247         grp = (void *)page_private(page);
248
249         if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
250                 unsigned int i;
251
252                 for (i = 0; i < clusterpages; ++i) {
253                         if (grp->compressed_pages[i] == page) {
254                                 WRITE_ONCE(grp->compressed_pages[i], NULL);
255                                 ret = 1;
256                                 break;
257                         }
258                 }
259                 erofs_workgroup_unfreeze(&grp->obj, 1);
260         }
261         rcu_read_unlock();
262
263         if (ret) {
264                 ClearPagePrivate(page);
265                 put_page(page);
266         }
267         return ret;
268 }
269 #else
270 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
271                                      struct address_space *mc,
272                                      pgoff_t index,
273                                      unsigned int clusterpages,
274                                      enum z_erofs_cache_alloctype type,
275                                      struct list_head *pagepool,
276                                      gfp_t gfp)
277 {
278         /* nowhere to load compressed pages from */
279 }
280 #endif
281
282 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
283 static inline bool try_to_reuse_as_compressed_page(
284         struct z_erofs_vle_work_builder *b,
285         struct page *page)
286 {
287         while (b->compressed_deficit) {
288                 --b->compressed_deficit;
289                 if (!cmpxchg(b->compressed_pages++, NULL, page))
290                         return true;
291         }
292
293         return false;
294 }
295
296 /* callers must be with work->lock held */
297 static int z_erofs_vle_work_add_page(
298         struct z_erofs_vle_work_builder *builder,
299         struct page *page,
300         enum z_erofs_page_type type)
301 {
302         int ret;
303         bool occupied;
304
305         /* give priority for the compressed data storage */
306         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
307                 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
308                 try_to_reuse_as_compressed_page(builder, page))
309                 return 0;
310
311         ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
312                 page, type, &occupied);
313         builder->work->vcnt += (unsigned int)ret;
314
315         return ret ? 0 : -EAGAIN;
316 }
317
318 static inline bool try_to_claim_workgroup(
319         struct z_erofs_vle_workgroup *grp,
320         z_erofs_vle_owned_workgrp_t *owned_head,
321         bool *hosted)
322 {
323         DBG_BUGON(*hosted == true);
324
325         /* let's claim these following types of workgroup */
326 retry:
327         if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
328                 /* type 1, nil workgroup */
329                 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
330                             *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
331                         goto retry;
332
333                 *owned_head = &grp->next;
334                 *hosted = true;
335         } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
336                 /*
337                  * type 2, link to the end of a existing open chain,
338                  * be careful that its submission itself is governed
339                  * by the original owned chain.
340                  */
341                 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
342                             *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
343                         goto retry;
344
345                 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
346         } else
347                 return false;   /* :( better luck next time */
348
349         return true;    /* lucky, I am the followee :) */
350 }
351
352 struct z_erofs_vle_work_finder {
353         struct super_block *sb;
354         pgoff_t idx;
355         unsigned int pageofs;
356
357         struct z_erofs_vle_workgroup **grp_ret;
358         enum z_erofs_vle_work_role *role;
359         z_erofs_vle_owned_workgrp_t *owned_head;
360         bool *hosted;
361 };
362
363 static struct z_erofs_vle_work *
364 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
365 {
366         bool tag, primary;
367         struct erofs_workgroup *egrp;
368         struct z_erofs_vle_workgroup *grp;
369         struct z_erofs_vle_work *work;
370
371         egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
372         if (!egrp) {
373                 *f->grp_ret = NULL;
374                 return NULL;
375         }
376
377         grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
378         *f->grp_ret = grp;
379
380         work = z_erofs_vle_grab_work(grp, f->pageofs);
381         /* if multiref is disabled, `primary' is always true */
382         primary = true;
383
384         DBG_BUGON(work->pageofs != f->pageofs);
385
386         /*
387          * lock must be taken first to avoid grp->next == NIL between
388          * claiming workgroup and adding pages:
389          *                        grp->next != NIL
390          *   grp->next = NIL
391          *   mutex_unlock_all
392          *                        mutex_lock(&work->lock)
393          *                        add all pages to pagevec
394          *
395          * [correct locking case 1]:
396          *   mutex_lock(grp->work[a])
397          *   ...
398          *   mutex_lock(grp->work[b])     mutex_lock(grp->work[c])
399          *   ...                          *role = SECONDARY
400          *                                add all pages to pagevec
401          *                                ...
402          *                                mutex_unlock(grp->work[c])
403          *   mutex_lock(grp->work[c])
404          *   ...
405          *   grp->next = NIL
406          *   mutex_unlock_all
407          *
408          * [correct locking case 2]:
409          *   mutex_lock(grp->work[b])
410          *   ...
411          *   mutex_lock(grp->work[a])
412          *   ...
413          *   mutex_lock(grp->work[c])
414          *   ...
415          *   grp->next = NIL
416          *   mutex_unlock_all
417          *                                mutex_lock(grp->work[a])
418          *                                *role = PRIMARY_OWNER
419          *                                add all pages to pagevec
420          *                                ...
421          */
422         mutex_lock(&work->lock);
423
424         *f->hosted = false;
425         if (!primary)
426                 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
427         /* claim the workgroup if possible */
428         else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
429                 *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
430         else
431                 *f->role = Z_EROFS_VLE_WORK_PRIMARY;
432
433         return work;
434 }
435
436 static struct z_erofs_vle_work *
437 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
438                           struct erofs_map_blocks *map)
439 {
440         bool gnew = false;
441         struct z_erofs_vle_workgroup *grp = *f->grp_ret;
442         struct z_erofs_vle_work *work;
443
444         /* if multiref is disabled, grp should never be nullptr */
445         if (unlikely(grp)) {
446                 DBG_BUGON(1);
447                 return ERR_PTR(-EINVAL);
448         }
449
450         /* no available workgroup, let's allocate one */
451         grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
452         if (unlikely(!grp))
453                 return ERR_PTR(-ENOMEM);
454
455         init_always(grp);
456         grp->obj.index = f->idx;
457         grp->llen = map->m_llen;
458
459         z_erofs_vle_set_workgrp_fmt(grp,
460                 (map->m_flags & EROFS_MAP_ZIPPED) ?
461                         Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
462                         Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
463
464         /* new workgrps have been claimed as type 1 */
465         WRITE_ONCE(grp->next, *f->owned_head);
466         /* primary and followed work for all new workgrps */
467         *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
468         /* it should be submitted by ourselves */
469         *f->hosted = true;
470
471         gnew = true;
472         work = z_erofs_vle_grab_primary_work(grp);
473         work->pageofs = f->pageofs;
474
475         /*
476          * lock all primary followed works before visible to others
477          * and mutex_trylock *never* fails for a new workgroup.
478          */
479         mutex_trylock(&work->lock);
480
481         if (gnew) {
482                 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
483
484                 if (err) {
485                         mutex_unlock(&work->lock);
486                         kmem_cache_free(z_erofs_workgroup_cachep, grp);
487                         return ERR_PTR(-EAGAIN);
488                 }
489         }
490
491         *f->owned_head = &grp->next;
492         *f->grp_ret = grp;
493         return work;
494 }
495
496 #define builder_is_followed(builder) \
497         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
498
499 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
500                                        struct super_block *sb,
501                                        struct erofs_map_blocks *map,
502                                        z_erofs_vle_owned_workgrp_t *owned_head)
503 {
504         const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
505         struct z_erofs_vle_workgroup *grp;
506         const struct z_erofs_vle_work_finder finder = {
507                 .sb = sb,
508                 .idx = erofs_blknr(map->m_pa),
509                 .pageofs = map->m_la & ~PAGE_MASK,
510                 .grp_ret = &grp,
511                 .role = &builder->role,
512                 .owned_head = owned_head,
513                 .hosted = &builder->hosted
514         };
515         struct z_erofs_vle_work *work;
516
517         DBG_BUGON(builder->work);
518
519         /* must be Z_EROFS_WORK_TAIL or the next chained work */
520         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
521         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
522
523         DBG_BUGON(erofs_blkoff(map->m_pa));
524
525 repeat:
526         work = z_erofs_vle_work_lookup(&finder);
527         if (work) {
528                 unsigned int orig_llen;
529
530                 /* increase workgroup `llen' if needed */
531                 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
532                        orig_llen != cmpxchg_relaxed(&grp->llen,
533                                                     orig_llen, map->m_llen))
534                         cpu_relax();
535                 goto got_it;
536         }
537
538         work = z_erofs_vle_work_register(&finder, map);
539         if (unlikely(work == ERR_PTR(-EAGAIN)))
540                 goto repeat;
541
542         if (unlikely(IS_ERR(work)))
543                 return PTR_ERR(work);
544 got_it:
545         z_erofs_pagevec_ctor_init(&builder->vector,
546                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
547
548         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
549                 /* enable possibly in-place decompression */
550                 builder->compressed_pages = grp->compressed_pages;
551                 builder->compressed_deficit = clusterpages;
552         } else {
553                 builder->compressed_pages = NULL;
554                 builder->compressed_deficit = 0;
555         }
556
557         builder->grp = grp;
558         builder->work = work;
559         return 0;
560 }
561
562 /*
563  * keep in mind that no referenced workgroups will be freed
564  * only after a RCU grace period, so rcu_read_lock() could
565  * prevent a workgroup from being freed.
566  */
567 static void z_erofs_rcu_callback(struct rcu_head *head)
568 {
569         struct z_erofs_vle_work *work = container_of(head,
570                 struct z_erofs_vle_work, rcu);
571         struct z_erofs_vle_workgroup *grp =
572                 z_erofs_vle_work_workgroup(work, true);
573
574         kmem_cache_free(z_erofs_workgroup_cachep, grp);
575 }
576
577 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
578 {
579         struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
580                 struct z_erofs_vle_workgroup, obj);
581         struct z_erofs_vle_work *const work = &vgrp->work;
582
583         call_rcu(&work->rcu, z_erofs_rcu_callback);
584 }
585
586 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
587         struct z_erofs_vle_work *work __maybe_unused)
588 {
589         erofs_workgroup_put(&grp->obj);
590 }
591
592 void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
593 {
594         struct z_erofs_vle_workgroup *grp =
595                 z_erofs_vle_work_workgroup(work, true);
596
597         __z_erofs_vle_work_release(grp, work);
598 }
599
600 static inline bool
601 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
602 {
603         struct z_erofs_vle_work *work = builder->work;
604
605         if (!work)
606                 return false;
607
608         z_erofs_pagevec_ctor_exit(&builder->vector, false);
609         mutex_unlock(&work->lock);
610
611         /*
612          * if all pending pages are added, don't hold work reference
613          * any longer if the current work isn't hosted by ourselves.
614          */
615         if (!builder->hosted)
616                 __z_erofs_vle_work_release(builder->grp, work);
617
618         builder->work = NULL;
619         builder->grp = NULL;
620         return true;
621 }
622
623 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
624                                                gfp_t gfp)
625 {
626         struct page *page = erofs_allocpage(pagepool, gfp);
627
628         if (unlikely(!page))
629                 return NULL;
630
631         page->mapping = Z_EROFS_MAPPING_STAGING;
632         return page;
633 }
634
635 struct z_erofs_vle_frontend {
636         struct inode *const inode;
637
638         struct z_erofs_vle_work_builder builder;
639         struct erofs_map_blocks_iter m_iter;
640
641         z_erofs_vle_owned_workgrp_t owned_head;
642
643         /* used for applying cache strategy on the fly */
644         bool backmost;
645         erofs_off_t headoffset;
646 };
647
648 #define VLE_FRONTEND_INIT(__i) { \
649         .inode = __i, \
650         .m_iter = { \
651                 { .m_llen = 0, .m_plen = 0 }, \
652                 .mpage = NULL \
653         }, \
654         .builder = VLE_WORK_BUILDER_INIT(), \
655         .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
656         .backmost = true, }
657
658 #ifdef EROFS_FS_HAS_MANAGED_CACHE
659 static inline bool
660 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
661 {
662         if (fe->backmost)
663                 return true;
664
665         if (EROFS_FS_ZIP_CACHE_LVL >= 2)
666                 return la < fe->headoffset;
667
668         return false;
669 }
670 #else
671 static inline bool
672 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
673 {
674         return false;
675 }
676 #endif
677
678 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
679                                 struct page *page,
680                                 struct list_head *page_pool)
681 {
682         struct super_block *const sb = fe->inode->i_sb;
683         struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
684         struct erofs_map_blocks_iter *const m = &fe->m_iter;
685         struct erofs_map_blocks *const map = &m->map;
686         struct z_erofs_vle_work_builder *const builder = &fe->builder;
687         const loff_t offset = page_offset(page);
688
689         bool tight = builder_is_followed(builder);
690         struct z_erofs_vle_work *work = builder->work;
691
692         enum z_erofs_cache_alloctype cache_strategy;
693         enum z_erofs_page_type page_type;
694         unsigned int cur, end, spiltted, index;
695         int err = 0;
696
697         /* register locked file pages as online pages in pack */
698         z_erofs_onlinepage_init(page);
699
700         spiltted = 0;
701         end = PAGE_SIZE;
702 repeat:
703         cur = end - 1;
704
705         /* lucky, within the range of the current map_blocks */
706         if (offset + cur >= map->m_la &&
707                 offset + cur < map->m_la + map->m_llen)
708                 goto hitted;
709
710         /* go ahead the next map_blocks */
711         debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
712
713         if (z_erofs_vle_work_iter_end(builder))
714                 fe->backmost = false;
715
716         map->m_la = offset + cur;
717         map->m_llen = 0;
718         err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
719         if (unlikely(err))
720                 goto err_out;
721
722         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
723                 goto hitted;
724
725         DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
726         DBG_BUGON(erofs_blkoff(map->m_pa));
727
728         err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
729         if (unlikely(err))
730                 goto err_out;
731
732         /* preload all compressed pages (maybe downgrade role if necessary) */
733         if (should_alloc_managed_pages(fe, map->m_la))
734                 cache_strategy = DELAYEDALLOC;
735         else
736                 cache_strategy = DONTALLOC;
737
738         preload_compressed_pages(builder, MNGD_MAPPING(sbi),
739                                  map->m_pa / PAGE_SIZE,
740                                  map->m_plen / PAGE_SIZE,
741                                  cache_strategy, page_pool, GFP_KERNEL);
742
743         tight &= builder_is_followed(builder);
744         work = builder->work;
745 hitted:
746         cur = end - min_t(unsigned int, offset + end - map->m_la, end);
747         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
748                 zero_user_segment(page, cur, end);
749                 goto next_part;
750         }
751
752         /* let's derive page type */
753         page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
754                 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
755                         (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
756                                 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
757
758 retry:
759         err = z_erofs_vle_work_add_page(builder, page, page_type);
760         /* should allocate an additional staging page for pagevec */
761         if (err == -EAGAIN) {
762                 struct page *const newpage =
763                         __stagingpage_alloc(page_pool, GFP_NOFS);
764
765                 err = z_erofs_vle_work_add_page(builder,
766                         newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
767                 if (likely(!err))
768                         goto retry;
769         }
770
771         if (unlikely(err))
772                 goto err_out;
773
774         index = page->index - map->m_la / PAGE_SIZE;
775
776         /* FIXME! avoid the last relundant fixup & endio */
777         z_erofs_onlinepage_fixup(page, index, true);
778
779         /* bump up the number of spiltted parts of a page */
780         ++spiltted;
781         /* also update nr_pages */
782         work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
783 next_part:
784         /* can be used for verification */
785         map->m_llen = offset + cur - map->m_la;
786
787         end = cur;
788         if (end > 0)
789                 goto repeat;
790
791 out:
792         /* FIXME! avoid the last relundant fixup & endio */
793         z_erofs_onlinepage_endio(page);
794
795         debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
796                 __func__, page, spiltted, map->m_llen);
797         return err;
798
799         /* if some error occurred while processing this page */
800 err_out:
801         SetPageError(page);
802         goto out;
803 }
804
805 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
806 {
807         tagptr1_t t = tagptr_init(tagptr1_t, ptr);
808         struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
809         bool background = tagptr_unfold_tags(t);
810
811         if (!background) {
812                 unsigned long flags;
813
814                 spin_lock_irqsave(&io->u.wait.lock, flags);
815                 if (!atomic_add_return(bios, &io->pending_bios))
816                         wake_up_locked(&io->u.wait);
817                 spin_unlock_irqrestore(&io->u.wait.lock, flags);
818                 return;
819         }
820
821         if (!atomic_add_return(bios, &io->pending_bios))
822                 queue_work(z_erofs_workqueue, &io->u.work);
823 }
824
825 static inline void z_erofs_vle_read_endio(struct bio *bio)
826 {
827         const blk_status_t err = bio->bi_status;
828         unsigned int i;
829         struct bio_vec *bvec;
830 #ifdef EROFS_FS_HAS_MANAGED_CACHE
831         struct address_space *mc = NULL;
832 #endif
833
834         bio_for_each_segment_all(bvec, bio, i) {
835                 struct page *page = bvec->bv_page;
836                 bool cachemngd = false;
837
838                 DBG_BUGON(PageUptodate(page));
839                 DBG_BUGON(!page->mapping);
840
841 #ifdef EROFS_FS_HAS_MANAGED_CACHE
842                 if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
843                         struct inode *const inode = page->mapping->host;
844                         struct super_block *const sb = inode->i_sb;
845
846                         mc = MNGD_MAPPING(EROFS_SB(sb));
847                 }
848
849                 /*
850                  * If mc has not gotten, it equals NULL,
851                  * however, page->mapping never be NULL if working properly.
852                  */
853                 cachemngd = (page->mapping == mc);
854 #endif
855
856                 if (unlikely(err))
857                         SetPageError(page);
858                 else if (cachemngd)
859                         SetPageUptodate(page);
860
861                 if (cachemngd)
862                         unlock_page(page);
863         }
864
865         z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
866         bio_put(bio);
867 }
868
869 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
870 static DEFINE_MUTEX(z_pagemap_global_lock);
871
872 static int z_erofs_vle_unzip(struct super_block *sb,
873         struct z_erofs_vle_workgroup *grp,
874         struct list_head *page_pool)
875 {
876         struct erofs_sb_info *const sbi = EROFS_SB(sb);
877         const unsigned int clusterpages = erofs_clusterpages(sbi);
878
879         struct z_erofs_pagevec_ctor ctor;
880         unsigned int nr_pages;
881         unsigned int sparsemem_pages = 0;
882         struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
883         struct page **pages, **compressed_pages, *page;
884         unsigned int i, llen;
885
886         enum z_erofs_page_type page_type;
887         bool overlapped;
888         struct z_erofs_vle_work *work;
889         void *vout;
890         int err;
891
892         might_sleep();
893         work = z_erofs_vle_grab_primary_work(grp);
894         DBG_BUGON(!READ_ONCE(work->nr_pages));
895
896         mutex_lock(&work->lock);
897         nr_pages = work->nr_pages;
898
899         if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
900                 pages = pages_onstack;
901         else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
902                 mutex_trylock(&z_pagemap_global_lock))
903                 pages = z_pagemap_global;
904         else {
905 repeat:
906                 pages = kvmalloc_array(nr_pages,
907                         sizeof(struct page *), GFP_KERNEL);
908
909                 /* fallback to global pagemap for the lowmem scenario */
910                 if (unlikely(!pages)) {
911                         if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
912                                 goto repeat;
913                         else {
914                                 mutex_lock(&z_pagemap_global_lock);
915                                 pages = z_pagemap_global;
916                         }
917                 }
918         }
919
920         for (i = 0; i < nr_pages; ++i)
921                 pages[i] = NULL;
922
923         z_erofs_pagevec_ctor_init(&ctor,
924                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
925
926         for (i = 0; i < work->vcnt; ++i) {
927                 unsigned int pagenr;
928
929                 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
930
931                 /* all pages in pagevec ought to be valid */
932                 DBG_BUGON(!page);
933                 DBG_BUGON(!page->mapping);
934
935                 if (z_erofs_gather_if_stagingpage(page_pool, page))
936                         continue;
937
938                 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
939                         pagenr = 0;
940                 else
941                         pagenr = z_erofs_onlinepage_index(page);
942
943                 DBG_BUGON(pagenr >= nr_pages);
944                 DBG_BUGON(pages[pagenr]);
945
946                 pages[pagenr] = page;
947         }
948         sparsemem_pages = i;
949
950         z_erofs_pagevec_ctor_exit(&ctor, true);
951
952         overlapped = false;
953         compressed_pages = grp->compressed_pages;
954
955         for (i = 0; i < clusterpages; ++i) {
956                 unsigned int pagenr;
957
958                 page = compressed_pages[i];
959
960                 /* all compressed pages ought to be valid */
961                 DBG_BUGON(!page);
962                 DBG_BUGON(!page->mapping);
963
964                 if (z_erofs_is_stagingpage(page))
965                         continue;
966 #ifdef EROFS_FS_HAS_MANAGED_CACHE
967                 if (page->mapping == MNGD_MAPPING(sbi)) {
968                         DBG_BUGON(!PageUptodate(page));
969                         continue;
970                 }
971 #endif
972
973                 /* only non-head page could be reused as a compressed page */
974                 pagenr = z_erofs_onlinepage_index(page);
975
976                 DBG_BUGON(pagenr >= nr_pages);
977                 DBG_BUGON(pages[pagenr]);
978                 ++sparsemem_pages;
979                 pages[pagenr] = page;
980
981                 overlapped = true;
982         }
983
984         llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
985
986         if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
987                 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
988                         pages, nr_pages, work->pageofs);
989                 goto out;
990         }
991
992         if (llen > grp->llen)
993                 llen = grp->llen;
994
995         err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
996                 clusterpages, pages, llen, work->pageofs,
997                 z_erofs_onlinepage_endio);
998         if (err != -ENOTSUPP)
999                 goto out_percpu;
1000
1001         if (sparsemem_pages >= nr_pages)
1002                 goto skip_allocpage;
1003
1004         for (i = 0; i < nr_pages; ++i) {
1005                 if (pages[i])
1006                         continue;
1007
1008                 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
1009         }
1010
1011 skip_allocpage:
1012         vout = erofs_vmap(pages, nr_pages);
1013
1014         err = z_erofs_vle_unzip_vmap(compressed_pages,
1015                 clusterpages, vout, llen, work->pageofs, overlapped);
1016
1017         erofs_vunmap(vout, nr_pages);
1018
1019 out:
1020         for (i = 0; i < nr_pages; ++i) {
1021                 page = pages[i];
1022                 DBG_BUGON(!page->mapping);
1023
1024                 /* recycle all individual staging pages */
1025                 if (z_erofs_gather_if_stagingpage(page_pool, page))
1026                         continue;
1027
1028                 if (unlikely(err < 0))
1029                         SetPageError(page);
1030
1031                 z_erofs_onlinepage_endio(page);
1032         }
1033
1034 out_percpu:
1035         for (i = 0; i < clusterpages; ++i) {
1036                 page = compressed_pages[i];
1037
1038 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1039                 if (page->mapping == MNGD_MAPPING(sbi))
1040                         continue;
1041 #endif
1042                 /* recycle all individual staging pages */
1043                 (void)z_erofs_gather_if_stagingpage(page_pool, page);
1044
1045                 WRITE_ONCE(compressed_pages[i], NULL);
1046         }
1047
1048         if (pages == z_pagemap_global)
1049                 mutex_unlock(&z_pagemap_global_lock);
1050         else if (unlikely(pages != pages_onstack))
1051                 kvfree(pages);
1052
1053         work->nr_pages = 0;
1054         work->vcnt = 0;
1055
1056         /* all work locks MUST be taken before the following line */
1057
1058         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1059
1060         /* all work locks SHOULD be released right now */
1061         mutex_unlock(&work->lock);
1062
1063         z_erofs_vle_work_release(work);
1064         return err;
1065 }
1066
1067 static void z_erofs_vle_unzip_all(struct super_block *sb,
1068                                   struct z_erofs_vle_unzip_io *io,
1069                                   struct list_head *page_pool)
1070 {
1071         z_erofs_vle_owned_workgrp_t owned = io->head;
1072
1073         while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1074                 struct z_erofs_vle_workgroup *grp;
1075
1076                 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1077                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1078
1079                 /* no possible that 'owned' equals NULL */
1080                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1081
1082                 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1083                 owned = READ_ONCE(grp->next);
1084
1085                 z_erofs_vle_unzip(sb, grp, page_pool);
1086         }
1087 }
1088
1089 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1090 {
1091         struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1092                 struct z_erofs_vle_unzip_io_sb, io.u.work);
1093         LIST_HEAD(page_pool);
1094
1095         DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1096         z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1097
1098         put_pages_list(&page_pool);
1099         kvfree(iosb);
1100 }
1101
1102 static struct page *
1103 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1104                            unsigned int nr,
1105                            struct list_head *pagepool,
1106                            struct address_space *mc,
1107                            gfp_t gfp)
1108 {
1109         /* determined at compile time to avoid too many #ifdefs */
1110         const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1111         const pgoff_t index = grp->obj.index;
1112         bool tocache = false;
1113
1114         struct address_space *mapping;
1115         struct page *oldpage, *page;
1116
1117         compressed_page_t t;
1118         int justfound;
1119
1120 repeat:
1121         page = READ_ONCE(grp->compressed_pages[nr]);
1122         oldpage = page;
1123
1124         if (!page)
1125                 goto out_allocpage;
1126
1127         /*
1128          * the cached page has not been allocated and
1129          * an placeholder is out there, prepare it now.
1130          */
1131         if (!nocache && page == PAGE_UNALLOCATED) {
1132                 tocache = true;
1133                 goto out_allocpage;
1134         }
1135
1136         /* process the target tagged pointer */
1137         t = tagptr_init(compressed_page_t, page);
1138         justfound = tagptr_unfold_tags(t);
1139         page = tagptr_unfold_ptr(t);
1140
1141         mapping = READ_ONCE(page->mapping);
1142
1143         /*
1144          * if managed cache is disabled, it's no way to
1145          * get such a cached-like page.
1146          */
1147         if (nocache) {
1148                 /* if managed cache is disabled, it is impossible `justfound' */
1149                 DBG_BUGON(justfound);
1150
1151                 /* and it should be locked, not uptodate, and not truncated */
1152                 DBG_BUGON(!PageLocked(page));
1153                 DBG_BUGON(PageUptodate(page));
1154                 DBG_BUGON(!mapping);
1155                 goto out;
1156         }
1157
1158         /*
1159          * unmanaged (file) pages are all locked solidly,
1160          * therefore it is impossible for `mapping' to be NULL.
1161          */
1162         if (mapping && mapping != mc)
1163                 /* ought to be unmanaged pages */
1164                 goto out;
1165
1166         lock_page(page);
1167
1168         /* only true if page reclaim goes wrong, should never happen */
1169         DBG_BUGON(justfound && PagePrivate(page));
1170
1171         /* the page is still in manage cache */
1172         if (page->mapping == mc) {
1173                 WRITE_ONCE(grp->compressed_pages[nr], page);
1174
1175                 if (!PagePrivate(page)) {
1176                         /*
1177                          * impossible to be !PagePrivate(page) for
1178                          * the current restriction as well if
1179                          * the page is already in compressed_pages[].
1180                          */
1181                         DBG_BUGON(!justfound);
1182
1183                         justfound = 0;
1184                         set_page_private(page, (unsigned long)grp);
1185                         SetPagePrivate(page);
1186                 }
1187
1188                 /* no need to submit io if it is already up-to-date */
1189                 if (PageUptodate(page)) {
1190                         unlock_page(page);
1191                         page = NULL;
1192                 }
1193                 goto out;
1194         }
1195
1196         /*
1197          * the managed page has been truncated, it's unsafe to
1198          * reuse this one, let's allocate a new cache-managed page.
1199          */
1200         DBG_BUGON(page->mapping);
1201         DBG_BUGON(!justfound);
1202
1203         tocache = true;
1204         unlock_page(page);
1205         put_page(page);
1206 out_allocpage:
1207         page = __stagingpage_alloc(pagepool, gfp);
1208         if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1209                 list_add(&page->lru, pagepool);
1210                 cpu_relax();
1211                 goto repeat;
1212         }
1213         if (nocache || !tocache)
1214                 goto out;
1215         if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1216                 page->mapping = Z_EROFS_MAPPING_STAGING;
1217                 goto out;
1218         }
1219
1220         set_page_private(page, (unsigned long)grp);
1221         SetPagePrivate(page);
1222 out:    /* the only exit (for tracing and debugging) */
1223         return page;
1224 }
1225
1226 static struct z_erofs_vle_unzip_io *
1227 jobqueue_init(struct super_block *sb,
1228               struct z_erofs_vle_unzip_io *io,
1229               bool foreground)
1230 {
1231         struct z_erofs_vle_unzip_io_sb *iosb;
1232
1233         if (foreground) {
1234                 /* waitqueue available for foreground io */
1235                 DBG_BUGON(!io);
1236
1237                 init_waitqueue_head(&io->u.wait);
1238                 atomic_set(&io->pending_bios, 0);
1239                 goto out;
1240         }
1241
1242         iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1243                         GFP_KERNEL | __GFP_NOFAIL);
1244         DBG_BUGON(!iosb);
1245
1246         /* initialize fields in the allocated descriptor */
1247         io = &iosb->io;
1248         iosb->sb = sb;
1249         INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1250 out:
1251         io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1252         return io;
1253 }
1254
1255 /* define workgroup jobqueue types */
1256 enum {
1257 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1258         JQ_BYPASS,
1259 #endif
1260         JQ_SUBMIT,
1261         NR_JOBQUEUES,
1262 };
1263
1264 static void *jobqueueset_init(struct super_block *sb,
1265                               z_erofs_vle_owned_workgrp_t qtail[],
1266                               struct z_erofs_vle_unzip_io *q[],
1267                               struct z_erofs_vle_unzip_io *fgq,
1268                               bool forcefg)
1269 {
1270 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1271         /*
1272          * if managed cache is enabled, bypass jobqueue is needed,
1273          * no need to read from device for all workgroups in this queue.
1274          */
1275         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1276         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1277 #endif
1278
1279         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1280         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1281
1282         return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1283 }
1284
1285 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1286 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1287                                     z_erofs_vle_owned_workgrp_t qtail[],
1288                                     z_erofs_vle_owned_workgrp_t owned_head)
1289 {
1290         z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1291         z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1292
1293         DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1294         if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1295                 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1296
1297         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1298
1299         WRITE_ONCE(*submit_qtail, owned_head);
1300         WRITE_ONCE(*bypass_qtail, &grp->next);
1301
1302         qtail[JQ_BYPASS] = &grp->next;
1303 }
1304
1305 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1306                                        unsigned int nr_bios,
1307                                        bool force_fg)
1308 {
1309         /*
1310          * although background is preferred, no one is pending for submission.
1311          * don't issue workqueue for decompression but drop it directly instead.
1312          */
1313         if (force_fg || nr_bios)
1314                 return false;
1315
1316         kvfree(container_of(q[JQ_SUBMIT],
1317                             struct z_erofs_vle_unzip_io_sb,
1318                             io));
1319         return true;
1320 }
1321 #else
1322 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1323                                     z_erofs_vle_owned_workgrp_t qtail[],
1324                                     z_erofs_vle_owned_workgrp_t owned_head)
1325 {
1326         /* impossible to bypass submission for managed cache disabled */
1327         DBG_BUGON(1);
1328 }
1329
1330 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1331                                        unsigned int nr_bios,
1332                                        bool force_fg)
1333 {
1334         /* bios should be >0 if managed cache is disabled */
1335         DBG_BUGON(!nr_bios);
1336         return false;
1337 }
1338 #endif
1339
1340 static bool z_erofs_vle_submit_all(struct super_block *sb,
1341                                    z_erofs_vle_owned_workgrp_t owned_head,
1342                                    struct list_head *pagepool,
1343                                    struct z_erofs_vle_unzip_io *fgq,
1344                                    bool force_fg)
1345 {
1346         struct erofs_sb_info *const sbi = EROFS_SB(sb);
1347         const unsigned int clusterpages = erofs_clusterpages(sbi);
1348         const gfp_t gfp = GFP_NOFS;
1349
1350         z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1351         struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1352         struct bio *bio;
1353         void *bi_private;
1354         /* since bio will be NULL, no need to initialize last_index */
1355         pgoff_t uninitialized_var(last_index);
1356         bool force_submit = false;
1357         unsigned int nr_bios;
1358
1359         if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1360                 return false;
1361
1362         force_submit = false;
1363         bio = NULL;
1364         nr_bios = 0;
1365         bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1366
1367         /* by default, all need io submission */
1368         q[JQ_SUBMIT]->head = owned_head;
1369
1370         do {
1371                 struct z_erofs_vle_workgroup *grp;
1372                 pgoff_t first_index;
1373                 struct page *page;
1374                 unsigned int i = 0, bypass = 0;
1375                 int err;
1376
1377                 /* no possible 'owned_head' equals the following */
1378                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1379                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1380
1381                 grp = container_of(owned_head,
1382                                    struct z_erofs_vle_workgroup, next);
1383
1384                 /* close the main owned chain at first */
1385                 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1386                                      Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1387
1388                 first_index = grp->obj.index;
1389                 force_submit |= (first_index != last_index + 1);
1390
1391 repeat:
1392                 page = pickup_page_for_submission(grp, i, pagepool,
1393                                                   MNGD_MAPPING(sbi), gfp);
1394                 if (!page) {
1395                         force_submit = true;
1396                         ++bypass;
1397                         goto skippage;
1398                 }
1399
1400                 if (bio && force_submit) {
1401 submit_bio_retry:
1402                         __submit_bio(bio, REQ_OP_READ, 0);
1403                         bio = NULL;
1404                 }
1405
1406                 if (!bio) {
1407                         bio = erofs_grab_bio(sb, first_index + i,
1408                                              BIO_MAX_PAGES,
1409                                              z_erofs_vle_read_endio, true);
1410                         bio->bi_private = bi_private;
1411
1412                         ++nr_bios;
1413                 }
1414
1415                 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1416                 if (err < PAGE_SIZE)
1417                         goto submit_bio_retry;
1418
1419                 force_submit = false;
1420                 last_index = first_index + i;
1421 skippage:
1422                 if (++i < clusterpages)
1423                         goto repeat;
1424
1425                 if (bypass < clusterpages)
1426                         qtail[JQ_SUBMIT] = &grp->next;
1427                 else
1428                         move_to_bypass_jobqueue(grp, qtail, owned_head);
1429         } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1430
1431         if (bio)
1432                 __submit_bio(bio, REQ_OP_READ, 0);
1433
1434         if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1435                 return true;
1436
1437         z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1438         return true;
1439 }
1440
1441 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1442                                      struct list_head *pagepool,
1443                                      bool force_fg)
1444 {
1445         struct super_block *sb = f->inode->i_sb;
1446         struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1447
1448         if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1449                 return;
1450
1451 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1452         z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1453 #endif
1454         if (!force_fg)
1455                 return;
1456
1457         /* wait until all bios are completed */
1458         wait_event(io[JQ_SUBMIT].u.wait,
1459                    !atomic_read(&io[JQ_SUBMIT].pending_bios));
1460
1461         /* let's synchronous decompression */
1462         z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1463 }
1464
1465 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1466                                              struct page *page)
1467 {
1468         struct inode *const inode = page->mapping->host;
1469         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1470         int err;
1471         LIST_HEAD(pagepool);
1472
1473         trace_erofs_readpage(page, false);
1474
1475         f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1476
1477         err = z_erofs_do_read_page(&f, page, &pagepool);
1478         (void)z_erofs_vle_work_iter_end(&f.builder);
1479
1480         if (err) {
1481                 errln("%s, failed to read, err [%d]", __func__, err);
1482                 goto out;
1483         }
1484
1485         z_erofs_submit_and_unzip(&f, &pagepool, true);
1486 out:
1487         if (f.m_iter.mpage)
1488                 put_page(f.m_iter.mpage);
1489
1490         /* clean up the remaining free pages */
1491         put_pages_list(&pagepool);
1492         return 0;
1493 }
1494
1495 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1496                                               struct address_space *mapping,
1497                                               struct list_head *pages,
1498                                               unsigned int nr_pages)
1499 {
1500         struct inode *const inode = mapping->host;
1501         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1502
1503         bool sync = __should_decompress_synchronously(sbi, nr_pages);
1504         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1505         gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1506         struct page *head = NULL;
1507         LIST_HEAD(pagepool);
1508
1509         trace_erofs_readpages(mapping->host, lru_to_page(pages),
1510                               nr_pages, false);
1511
1512         f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1513
1514         for (; nr_pages; --nr_pages) {
1515                 struct page *page = lru_to_page(pages);
1516
1517                 prefetchw(&page->flags);
1518                 list_del(&page->lru);
1519
1520                 /*
1521                  * A pure asynchronous readahead is indicated if
1522                  * a PG_readahead marked page is hitted at first.
1523                  * Let's also do asynchronous decompression for this case.
1524                  */
1525                 sync &= !(PageReadahead(page) && !head);
1526
1527                 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1528                         list_add(&page->lru, &pagepool);
1529                         continue;
1530                 }
1531
1532                 set_page_private(page, (unsigned long)head);
1533                 head = page;
1534         }
1535
1536         while (head) {
1537                 struct page *page = head;
1538                 int err;
1539
1540                 /* traversal in reverse order */
1541                 head = (void *)page_private(page);
1542
1543                 err = z_erofs_do_read_page(&f, page, &pagepool);
1544                 if (err) {
1545                         struct erofs_vnode *vi = EROFS_V(inode);
1546
1547                         errln("%s, readahead error at page %lu of nid %llu",
1548                                 __func__, page->index, vi->nid);
1549                 }
1550
1551                 put_page(page);
1552         }
1553
1554         (void)z_erofs_vle_work_iter_end(&f.builder);
1555
1556         z_erofs_submit_and_unzip(&f, &pagepool, sync);
1557
1558         if (f.m_iter.mpage)
1559                 put_page(f.m_iter.mpage);
1560
1561         /* clean up the remaining free pages */
1562         put_pages_list(&pagepool);
1563         return 0;
1564 }
1565
1566 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1567         .readpage = z_erofs_vle_normalaccess_readpage,
1568         .readpages = z_erofs_vle_normalaccess_readpages,
1569 };
1570
1571 /*
1572  * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1573  * ---
1574  * VLE compression mode attempts to compress a number of logical data into
1575  * a physical cluster with a fixed size.
1576  * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1577  */
1578 #define __vle_cluster_advise(x, bit, bits) \
1579         ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1580
1581 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1582         Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1583
1584 #define vle_cluster_type(di)    \
1585         __vle_cluster_type((di)->di_advise)
1586
1587 static int
1588 vle_decompressed_index_clusterofs(unsigned int *clusterofs,
1589                                   unsigned int clustersize,
1590                                   struct z_erofs_vle_decompressed_index *di)
1591 {
1592         switch (vle_cluster_type(di)) {
1593         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1594                 *clusterofs = clustersize;
1595                 break;
1596         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1597         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1598                 *clusterofs = le16_to_cpu(di->di_clusterofs);
1599                 break;
1600         default:
1601                 DBG_BUGON(1);
1602                 return -EIO;
1603         }
1604         return 0;
1605 }
1606
1607 static inline erofs_blk_t
1608 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1609 {
1610         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1611         struct erofs_vnode *vi = EROFS_V(inode);
1612
1613         unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1614                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1615                 index * sizeof(struct z_erofs_vle_decompressed_index);
1616
1617         return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1618 }
1619
1620 static inline unsigned int
1621 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1622 {
1623         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1624         struct erofs_vnode *vi = EROFS_V(inode);
1625
1626         unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1627                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1628                 index * sizeof(struct z_erofs_vle_decompressed_index);
1629
1630         return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1631 }
1632
1633 struct vle_map_blocks_iter_ctx {
1634         struct inode *inode;
1635         struct super_block *sb;
1636         unsigned int clusterbits;
1637
1638         struct page **mpage_ret;
1639         void **kaddr_ret;
1640 };
1641
1642 static int
1643 vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
1644                             unsigned int lcn,   /* logical cluster number */
1645                             unsigned long long *ofs,
1646                             erofs_blk_t *pblk,
1647                             unsigned int *flags)
1648 {
1649         const unsigned int clustersize = 1 << ctx->clusterbits;
1650         const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
1651         struct page *mpage = *ctx->mpage_ret;   /* extent metapage */
1652
1653         struct z_erofs_vle_decompressed_index *di;
1654         unsigned int cluster_type, delta0;
1655
1656         if (mpage->index != mblk) {
1657                 kunmap_atomic(*ctx->kaddr_ret);
1658                 unlock_page(mpage);
1659                 put_page(mpage);
1660
1661                 mpage = erofs_get_meta_page(ctx->sb, mblk, false);
1662                 if (IS_ERR(mpage)) {
1663                         *ctx->mpage_ret = NULL;
1664                         return PTR_ERR(mpage);
1665                 }
1666                 *ctx->mpage_ret = mpage;
1667                 *ctx->kaddr_ret = kmap_atomic(mpage);
1668         }
1669
1670         di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
1671
1672         cluster_type = vle_cluster_type(di);
1673         switch (cluster_type) {
1674         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1675                 delta0 = le16_to_cpu(di->di_u.delta[0]);
1676                 if (unlikely(!delta0 || delta0 > lcn)) {
1677                         errln("invalid NONHEAD dl0 %u at lcn %u of nid %llu",
1678                               delta0, lcn, EROFS_V(ctx->inode)->nid);
1679                         DBG_BUGON(1);
1680                         return -EIO;
1681                 }
1682                 return vle_get_logical_extent_head(ctx,
1683                         lcn - delta0, ofs, pblk, flags);
1684         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1685                 *flags ^= EROFS_MAP_ZIPPED;
1686                 /* fallthrough */
1687         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1688                 /* clustersize should be a power of two */
1689                 *ofs = ((u64)lcn << ctx->clusterbits) +
1690                         (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1691                 *pblk = le32_to_cpu(di->di_u.blkaddr);
1692                 break;
1693         default:
1694                 errln("unknown cluster type %u at lcn %u of nid %llu",
1695                       cluster_type, lcn, EROFS_V(ctx->inode)->nid);
1696                 DBG_BUGON(1);
1697                 return -EIO;
1698         }
1699         return 0;
1700 }
1701
1702 int z_erofs_map_blocks_iter(struct inode *inode,
1703         struct erofs_map_blocks *map,
1704         struct page **mpage_ret, int flags)
1705 {
1706         void *kaddr;
1707         const struct vle_map_blocks_iter_ctx ctx = {
1708                 .inode = inode,
1709                 .sb = inode->i_sb,
1710                 .clusterbits = EROFS_I_SB(inode)->clusterbits,
1711                 .mpage_ret = mpage_ret,
1712                 .kaddr_ret = &kaddr
1713         };
1714         const unsigned int clustersize = 1 << ctx.clusterbits;
1715         /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1716         const bool initial = !map->m_llen;
1717
1718         /* logicial extent (start, end) offset */
1719         unsigned long long ofs, end;
1720         unsigned int lcn;
1721         u32 ofs_rem;
1722
1723         /* initialize `pblk' to keep gcc from printing foolish warnings */
1724         erofs_blk_t mblk, pblk = 0;
1725         struct page *mpage = *mpage_ret;
1726         struct z_erofs_vle_decompressed_index *di;
1727         unsigned int cluster_type, logical_cluster_ofs;
1728         int err = 0;
1729
1730         trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
1731
1732         /* when trying to read beyond EOF, leave it unmapped */
1733         if (unlikely(map->m_la >= inode->i_size)) {
1734                 DBG_BUGON(!initial);
1735                 map->m_llen = map->m_la + 1 - inode->i_size;
1736                 map->m_la = inode->i_size;
1737                 map->m_flags = 0;
1738                 goto out;
1739         }
1740
1741         debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1742                 map->m_la, map->m_llen);
1743
1744         ofs = map->m_la + map->m_llen;
1745
1746         /* clustersize should be power of two */
1747         lcn = ofs >> ctx.clusterbits;
1748         ofs_rem = ofs & (clustersize - 1);
1749
1750         mblk = vle_extent_blkaddr(inode, lcn);
1751
1752         if (!mpage || mpage->index != mblk) {
1753                 if (mpage)
1754                         put_page(mpage);
1755
1756                 mpage = erofs_get_meta_page(ctx.sb, mblk, false);
1757                 if (IS_ERR(mpage)) {
1758                         err = PTR_ERR(mpage);
1759                         goto out;
1760                 }
1761                 *mpage_ret = mpage;
1762         } else {
1763                 lock_page(mpage);
1764                 DBG_BUGON(!PageUptodate(mpage));
1765         }
1766
1767         kaddr = kmap_atomic(mpage);
1768         di = kaddr + vle_extent_blkoff(inode, lcn);
1769
1770         debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
1771                 mblk, vle_extent_blkoff(inode, lcn));
1772
1773         err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
1774                                                 clustersize, di);
1775         if (unlikely(err))
1776                 goto unmap_out;
1777
1778         if (!initial) {
1779                 /* [walking mode] 'map' has been already initialized */
1780                 map->m_llen += logical_cluster_ofs;
1781                 goto unmap_out;
1782         }
1783
1784         /* by default, compressed */
1785         map->m_flags |= EROFS_MAP_ZIPPED;
1786
1787         end = ((u64)lcn + 1) * clustersize;
1788
1789         cluster_type = vle_cluster_type(di);
1790
1791         switch (cluster_type) {
1792         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1793                 if (ofs_rem >= logical_cluster_ofs)
1794                         map->m_flags ^= EROFS_MAP_ZIPPED;
1795                 /* fallthrough */
1796         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1797                 if (ofs_rem == logical_cluster_ofs) {
1798                         pblk = le32_to_cpu(di->di_u.blkaddr);
1799                         goto exact_hitted;
1800                 }
1801
1802                 if (ofs_rem > logical_cluster_ofs) {
1803                         ofs = (u64)lcn * clustersize | logical_cluster_ofs;
1804                         pblk = le32_to_cpu(di->di_u.blkaddr);
1805                         break;
1806                 }
1807
1808                 /* logical cluster number should be >= 1 */
1809                 if (unlikely(!lcn)) {
1810                         errln("invalid logical cluster 0 at nid %llu",
1811                                 EROFS_V(inode)->nid);
1812                         err = -EIO;
1813                         goto unmap_out;
1814                 }
1815                 end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
1816                 /* fallthrough */
1817         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1818                 /* get the correspoinding first chunk */
1819                 err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
1820                                                   &pblk, &map->m_flags);
1821                 mpage = *mpage_ret;
1822
1823                 if (unlikely(err)) {
1824                         if (mpage)
1825                                 goto unmap_out;
1826                         goto out;
1827                 }
1828                 break;
1829         default:
1830                 errln("unknown cluster type %u at offset %llu of nid %llu",
1831                         cluster_type, ofs, EROFS_V(inode)->nid);
1832                 err = -EIO;
1833                 goto unmap_out;
1834         }
1835
1836         map->m_la = ofs;
1837 exact_hitted:
1838         map->m_llen = end - ofs;
1839         map->m_plen = clustersize;
1840         map->m_pa = blknr_to_addr(pblk);
1841         map->m_flags |= EROFS_MAP_MAPPED;
1842 unmap_out:
1843         kunmap_atomic(kaddr);
1844         unlock_page(mpage);
1845 out:
1846         debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1847                 __func__, map->m_la, map->m_pa,
1848                 map->m_llen, map->m_plen, map->m_flags);
1849
1850         trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
1851
1852         /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1853         DBG_BUGON(err < 0 && err != -ENOMEM);
1854         return err;
1855 }
1856