]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/erofs/unzip_vle.c
staging: erofs: fix mis-acted TAIL merging behavior
[linux.git] / drivers / staging / erofs / unzip_vle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
15
16 #include <trace/events/erofs.h>
17
18 /*
19  * a compressed_pages[] placeholder in order to avoid
20  * being filled with file pages for in-place decompression.
21  */
22 #define PAGE_UNALLOCATED     ((void *)0x5F0E4B1D)
23
24 /* how to allocate cached pages for a workgroup */
25 enum z_erofs_cache_alloctype {
26         DONTALLOC,      /* don't allocate any cached pages */
27         DELAYEDALLOC,   /* delayed allocation (at the time of submitting io) */
28 };
29
30 /*
31  * tagged pointer with 1-bit tag for all compressed pages
32  * tag 0 - the page is just found with an extra page reference
33  */
34 typedef tagptr1_t compressed_page_t;
35
36 #define tag_compressed_page_justfound(page) \
37         tagptr_fold(compressed_page_t, page, 1)
38
39 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
40 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
41
42 void z_erofs_exit_zip_subsystem(void)
43 {
44         destroy_workqueue(z_erofs_workqueue);
45         kmem_cache_destroy(z_erofs_workgroup_cachep);
46 }
47
48 static inline int init_unzip_workqueue(void)
49 {
50         const unsigned int onlinecpus = num_possible_cpus();
51
52         /*
53          * we don't need too many threads, limiting threads
54          * could improve scheduling performance.
55          */
56         z_erofs_workqueue =
57                 alloc_workqueue("erofs_unzipd",
58                                 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
59                                 onlinecpus + onlinecpus / 4);
60
61         return z_erofs_workqueue ? 0 : -ENOMEM;
62 }
63
64 static void init_once(void *ptr)
65 {
66         struct z_erofs_vle_workgroup *grp = ptr;
67         struct z_erofs_vle_work *const work =
68                 z_erofs_vle_grab_primary_work(grp);
69         unsigned int i;
70
71         mutex_init(&work->lock);
72         work->nr_pages = 0;
73         work->vcnt = 0;
74         for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
75                 grp->compressed_pages[i] = NULL;
76 }
77
78 static void init_always(struct z_erofs_vle_workgroup *grp)
79 {
80         struct z_erofs_vle_work *const work =
81                 z_erofs_vle_grab_primary_work(grp);
82
83         atomic_set(&grp->obj.refcount, 1);
84         grp->flags = 0;
85
86         DBG_BUGON(work->nr_pages);
87         DBG_BUGON(work->vcnt);
88 }
89
90 int __init z_erofs_init_zip_subsystem(void)
91 {
92         z_erofs_workgroup_cachep =
93                 kmem_cache_create("erofs_compress",
94                                   Z_EROFS_WORKGROUP_SIZE, 0,
95                                   SLAB_RECLAIM_ACCOUNT, init_once);
96
97         if (z_erofs_workgroup_cachep) {
98                 if (!init_unzip_workqueue())
99                         return 0;
100
101                 kmem_cache_destroy(z_erofs_workgroup_cachep);
102         }
103         return -ENOMEM;
104 }
105
106 enum z_erofs_vle_work_role {
107         Z_EROFS_VLE_WORK_SECONDARY,
108         Z_EROFS_VLE_WORK_PRIMARY,
109         /*
110          * The current work was the tail of an exist chain, and the previous
111          * processed chained works are all decided to be hooked up to it.
112          * A new chain should be created for the remaining unprocessed works,
113          * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
114          * the next work cannot reuse the whole page in the following scenario:
115          *  ________________________________________________________________
116          * |      tail (partial) page     |       head (partial) page       |
117          * |  (belongs to the next work)  |  (belongs to the current work)  |
118          * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
119          */
120         Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
121         /*
122          * The current work has been linked with the processed chained works,
123          * and could be also linked with the potential remaining works, which
124          * means if the processing page is the tail partial page of the work,
125          * the current work can safely use the whole page (since the next work
126          * is under control) for in-place decompression, as illustrated below:
127          *  ________________________________________________________________
128          * |  tail (partial) page  |          head (partial) page           |
129          * | (of the current work) |         (of the previous work)         |
130          * |  PRIMARY_FOLLOWED or  |                                        |
131          * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
132          *
133          * [  (*) the above page can be used for the current work itself.  ]
134          */
135         Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
136         Z_EROFS_VLE_WORK_MAX
137 };
138
139 struct z_erofs_vle_work_builder {
140         enum z_erofs_vle_work_role role;
141         /*
142          * 'hosted = false' means that the current workgroup doesn't belong to
143          * the owned chained workgroups. In the other words, it is none of our
144          * business to submit this workgroup.
145          */
146         bool hosted;
147
148         struct z_erofs_vle_workgroup *grp;
149         struct z_erofs_vle_work *work;
150         struct z_erofs_pagevec_ctor vector;
151
152         /* pages used for reading the compressed data */
153         struct page **compressed_pages;
154         unsigned int compressed_deficit;
155 };
156
157 #define VLE_WORK_BUILDER_INIT() \
158         { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
159
160 #ifdef EROFS_FS_HAS_MANAGED_CACHE
161 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
162                                      struct address_space *mc,
163                                      pgoff_t index,
164                                      unsigned int clusterpages,
165                                      enum z_erofs_cache_alloctype type,
166                                      struct list_head *pagepool,
167                                      gfp_t gfp)
168 {
169         struct page **const pages = bl->compressed_pages;
170         const unsigned int remaining = bl->compressed_deficit;
171         bool standalone = true;
172         unsigned int i, j = 0;
173
174         if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
175                 return;
176
177         gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
178
179         index += clusterpages - remaining;
180
181         for (i = 0; i < remaining; ++i) {
182                 struct page *page;
183                 compressed_page_t t;
184
185                 /* the compressed page was loaded before */
186                 if (READ_ONCE(pages[i]))
187                         continue;
188
189                 page = find_get_page(mc, index + i);
190
191                 if (page) {
192                         t = tag_compressed_page_justfound(page);
193                 } else if (type == DELAYEDALLOC) {
194                         t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
195                 } else {        /* DONTALLOC */
196                         if (standalone)
197                                 j = i;
198                         standalone = false;
199                         continue;
200                 }
201
202                 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
203                         continue;
204
205                 if (page)
206                         put_page(page);
207         }
208         bl->compressed_pages += j;
209         bl->compressed_deficit = remaining - j;
210
211         if (standalone)
212                 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
213 }
214
215 /* called by erofs_shrinker to get rid of all compressed_pages */
216 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
217                                        struct erofs_workgroup *egrp)
218 {
219         struct z_erofs_vle_workgroup *const grp =
220                 container_of(egrp, struct z_erofs_vle_workgroup, obj);
221         struct address_space *const mapping = MNGD_MAPPING(sbi);
222         const int clusterpages = erofs_clusterpages(sbi);
223         int i;
224
225         /*
226          * refcount of workgroup is now freezed as 1,
227          * therefore no need to worry about available decompression users.
228          */
229         for (i = 0; i < clusterpages; ++i) {
230                 struct page *page = grp->compressed_pages[i];
231
232                 if (!page || page->mapping != mapping)
233                         continue;
234
235                 /* block other users from reclaiming or migrating the page */
236                 if (!trylock_page(page))
237                         return -EBUSY;
238
239                 /* barrier is implied in the following 'unlock_page' */
240                 WRITE_ONCE(grp->compressed_pages[i], NULL);
241
242                 set_page_private(page, 0);
243                 ClearPagePrivate(page);
244
245                 unlock_page(page);
246                 put_page(page);
247         }
248         return 0;
249 }
250
251 int erofs_try_to_free_cached_page(struct address_space *mapping,
252                                   struct page *page)
253 {
254         struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
255         const unsigned int clusterpages = erofs_clusterpages(sbi);
256         struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
257         int ret = 0;    /* 0 - busy */
258
259         if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
260                 unsigned int i;
261
262                 for (i = 0; i < clusterpages; ++i) {
263                         if (grp->compressed_pages[i] == page) {
264                                 WRITE_ONCE(grp->compressed_pages[i], NULL);
265                                 ret = 1;
266                                 break;
267                         }
268                 }
269                 erofs_workgroup_unfreeze(&grp->obj, 1);
270
271                 if (ret) {
272                         ClearPagePrivate(page);
273                         put_page(page);
274                 }
275         }
276         return ret;
277 }
278 #else
279 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
280                                      struct address_space *mc,
281                                      pgoff_t index,
282                                      unsigned int clusterpages,
283                                      enum z_erofs_cache_alloctype type,
284                                      struct list_head *pagepool,
285                                      gfp_t gfp)
286 {
287         /* nowhere to load compressed pages from */
288 }
289 #endif
290
291 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
292 static inline bool try_to_reuse_as_compressed_page(
293         struct z_erofs_vle_work_builder *b,
294         struct page *page)
295 {
296         while (b->compressed_deficit) {
297                 --b->compressed_deficit;
298                 if (!cmpxchg(b->compressed_pages++, NULL, page))
299                         return true;
300         }
301
302         return false;
303 }
304
305 /* callers must be with work->lock held */
306 static int z_erofs_vle_work_add_page(
307         struct z_erofs_vle_work_builder *builder,
308         struct page *page,
309         enum z_erofs_page_type type)
310 {
311         int ret;
312         bool occupied;
313
314         /* give priority for the compressed data storage */
315         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
316                 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
317                 try_to_reuse_as_compressed_page(builder, page))
318                 return 0;
319
320         ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
321                 page, type, &occupied);
322         builder->work->vcnt += (unsigned int)ret;
323
324         return ret ? 0 : -EAGAIN;
325 }
326
327 static enum z_erofs_vle_work_role
328 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
329                        z_erofs_vle_owned_workgrp_t *owned_head,
330                        bool *hosted)
331 {
332         DBG_BUGON(*hosted == true);
333
334         /* let's claim these following types of workgroup */
335 retry:
336         if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
337                 /* type 1, nil workgroup */
338                 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
339                             *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
340                         goto retry;
341
342                 *owned_head = &grp->next;
343                 *hosted = true;
344                 /* lucky, I am the followee :) */
345                 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
346
347         } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
348                 /*
349                  * type 2, link to the end of a existing open chain,
350                  * be careful that its submission itself is governed
351                  * by the original owned chain.
352                  */
353                 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
354                             *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
355                         goto retry;
356                 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
357                 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
358         }
359
360         return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
361 }
362
363 struct z_erofs_vle_work_finder {
364         struct super_block *sb;
365         pgoff_t idx;
366         unsigned int pageofs;
367
368         struct z_erofs_vle_workgroup **grp_ret;
369         enum z_erofs_vle_work_role *role;
370         z_erofs_vle_owned_workgrp_t *owned_head;
371         bool *hosted;
372 };
373
374 static struct z_erofs_vle_work *
375 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
376 {
377         bool tag, primary;
378         struct erofs_workgroup *egrp;
379         struct z_erofs_vle_workgroup *grp;
380         struct z_erofs_vle_work *work;
381
382         egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
383         if (!egrp) {
384                 *f->grp_ret = NULL;
385                 return NULL;
386         }
387
388         grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
389         *f->grp_ret = grp;
390
391         work = z_erofs_vle_grab_work(grp, f->pageofs);
392         /* if multiref is disabled, `primary' is always true */
393         primary = true;
394
395         DBG_BUGON(work->pageofs != f->pageofs);
396
397         /*
398          * lock must be taken first to avoid grp->next == NIL between
399          * claiming workgroup and adding pages:
400          *                        grp->next != NIL
401          *   grp->next = NIL
402          *   mutex_unlock_all
403          *                        mutex_lock(&work->lock)
404          *                        add all pages to pagevec
405          *
406          * [correct locking case 1]:
407          *   mutex_lock(grp->work[a])
408          *   ...
409          *   mutex_lock(grp->work[b])     mutex_lock(grp->work[c])
410          *   ...                          *role = SECONDARY
411          *                                add all pages to pagevec
412          *                                ...
413          *                                mutex_unlock(grp->work[c])
414          *   mutex_lock(grp->work[c])
415          *   ...
416          *   grp->next = NIL
417          *   mutex_unlock_all
418          *
419          * [correct locking case 2]:
420          *   mutex_lock(grp->work[b])
421          *   ...
422          *   mutex_lock(grp->work[a])
423          *   ...
424          *   mutex_lock(grp->work[c])
425          *   ...
426          *   grp->next = NIL
427          *   mutex_unlock_all
428          *                                mutex_lock(grp->work[a])
429          *                                *role = PRIMARY_OWNER
430          *                                add all pages to pagevec
431          *                                ...
432          */
433         mutex_lock(&work->lock);
434
435         *f->hosted = false;
436         if (!primary)
437                 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
438         else    /* claim the workgroup if possible */
439                 *f->role = try_to_claim_workgroup(grp, f->owned_head,
440                                                   f->hosted);
441         return work;
442 }
443
444 static struct z_erofs_vle_work *
445 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
446                           struct erofs_map_blocks *map)
447 {
448         bool gnew = false;
449         struct z_erofs_vle_workgroup *grp = *f->grp_ret;
450         struct z_erofs_vle_work *work;
451
452         /* if multiref is disabled, grp should never be nullptr */
453         if (unlikely(grp)) {
454                 DBG_BUGON(1);
455                 return ERR_PTR(-EINVAL);
456         }
457
458         /* no available workgroup, let's allocate one */
459         grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
460         if (unlikely(!grp))
461                 return ERR_PTR(-ENOMEM);
462
463         init_always(grp);
464         grp->obj.index = f->idx;
465         grp->llen = map->m_llen;
466
467         z_erofs_vle_set_workgrp_fmt(grp,
468                 (map->m_flags & EROFS_MAP_ZIPPED) ?
469                         Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
470                         Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
471
472         /* new workgrps have been claimed as type 1 */
473         WRITE_ONCE(grp->next, *f->owned_head);
474         /* primary and followed work for all new workgrps */
475         *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
476         /* it should be submitted by ourselves */
477         *f->hosted = true;
478
479         gnew = true;
480         work = z_erofs_vle_grab_primary_work(grp);
481         work->pageofs = f->pageofs;
482
483         /*
484          * lock all primary followed works before visible to others
485          * and mutex_trylock *never* fails for a new workgroup.
486          */
487         mutex_trylock(&work->lock);
488
489         if (gnew) {
490                 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
491
492                 if (err) {
493                         mutex_unlock(&work->lock);
494                         kmem_cache_free(z_erofs_workgroup_cachep, grp);
495                         return ERR_PTR(-EAGAIN);
496                 }
497         }
498
499         *f->owned_head = &grp->next;
500         *f->grp_ret = grp;
501         return work;
502 }
503
504 #define builder_is_hooked(builder) \
505         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
506
507 #define builder_is_followed(builder) \
508         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
509
510 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
511                                        struct super_block *sb,
512                                        struct erofs_map_blocks *map,
513                                        z_erofs_vle_owned_workgrp_t *owned_head)
514 {
515         const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
516         struct z_erofs_vle_workgroup *grp;
517         const struct z_erofs_vle_work_finder finder = {
518                 .sb = sb,
519                 .idx = erofs_blknr(map->m_pa),
520                 .pageofs = map->m_la & ~PAGE_MASK,
521                 .grp_ret = &grp,
522                 .role = &builder->role,
523                 .owned_head = owned_head,
524                 .hosted = &builder->hosted
525         };
526         struct z_erofs_vle_work *work;
527
528         DBG_BUGON(builder->work);
529
530         /* must be Z_EROFS_WORK_TAIL or the next chained work */
531         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
532         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
533
534         DBG_BUGON(erofs_blkoff(map->m_pa));
535
536 repeat:
537         work = z_erofs_vle_work_lookup(&finder);
538         if (work) {
539                 unsigned int orig_llen;
540
541                 /* increase workgroup `llen' if needed */
542                 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
543                        orig_llen != cmpxchg_relaxed(&grp->llen,
544                                                     orig_llen, map->m_llen))
545                         cpu_relax();
546                 goto got_it;
547         }
548
549         work = z_erofs_vle_work_register(&finder, map);
550         if (unlikely(work == ERR_PTR(-EAGAIN)))
551                 goto repeat;
552
553         if (IS_ERR(work))
554                 return PTR_ERR(work);
555 got_it:
556         z_erofs_pagevec_ctor_init(&builder->vector,
557                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
558
559         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
560                 /* enable possibly in-place decompression */
561                 builder->compressed_pages = grp->compressed_pages;
562                 builder->compressed_deficit = clusterpages;
563         } else {
564                 builder->compressed_pages = NULL;
565                 builder->compressed_deficit = 0;
566         }
567
568         builder->grp = grp;
569         builder->work = work;
570         return 0;
571 }
572
573 /*
574  * keep in mind that no referenced workgroups will be freed
575  * only after a RCU grace period, so rcu_read_lock() could
576  * prevent a workgroup from being freed.
577  */
578 static void z_erofs_rcu_callback(struct rcu_head *head)
579 {
580         struct z_erofs_vle_work *work = container_of(head,
581                 struct z_erofs_vle_work, rcu);
582         struct z_erofs_vle_workgroup *grp =
583                 z_erofs_vle_work_workgroup(work, true);
584
585         kmem_cache_free(z_erofs_workgroup_cachep, grp);
586 }
587
588 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
589 {
590         struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
591                 struct z_erofs_vle_workgroup, obj);
592         struct z_erofs_vle_work *const work = &vgrp->work;
593
594         call_rcu(&work->rcu, z_erofs_rcu_callback);
595 }
596
597 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
598         struct z_erofs_vle_work *work __maybe_unused)
599 {
600         erofs_workgroup_put(&grp->obj);
601 }
602
603 static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
604 {
605         struct z_erofs_vle_workgroup *grp =
606                 z_erofs_vle_work_workgroup(work, true);
607
608         __z_erofs_vle_work_release(grp, work);
609 }
610
611 static inline bool
612 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
613 {
614         struct z_erofs_vle_work *work = builder->work;
615
616         if (!work)
617                 return false;
618
619         z_erofs_pagevec_ctor_exit(&builder->vector, false);
620         mutex_unlock(&work->lock);
621
622         /*
623          * if all pending pages are added, don't hold work reference
624          * any longer if the current work isn't hosted by ourselves.
625          */
626         if (!builder->hosted)
627                 __z_erofs_vle_work_release(builder->grp, work);
628
629         builder->work = NULL;
630         builder->grp = NULL;
631         return true;
632 }
633
634 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
635                                                gfp_t gfp)
636 {
637         struct page *page = erofs_allocpage(pagepool, gfp);
638
639         if (unlikely(!page))
640                 return NULL;
641
642         page->mapping = Z_EROFS_MAPPING_STAGING;
643         return page;
644 }
645
646 struct z_erofs_vle_frontend {
647         struct inode *const inode;
648
649         struct z_erofs_vle_work_builder builder;
650         struct erofs_map_blocks map;
651
652         z_erofs_vle_owned_workgrp_t owned_head;
653
654         /* used for applying cache strategy on the fly */
655         bool backmost;
656         erofs_off_t headoffset;
657 };
658
659 #define VLE_FRONTEND_INIT(__i) { \
660         .inode = __i, \
661         .map = { \
662                 .m_llen = 0, \
663                 .m_plen = 0, \
664                 .mpage = NULL \
665         }, \
666         .builder = VLE_WORK_BUILDER_INIT(), \
667         .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
668         .backmost = true, }
669
670 #ifdef EROFS_FS_HAS_MANAGED_CACHE
671 static inline bool
672 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
673 {
674         if (fe->backmost)
675                 return true;
676
677         if (EROFS_FS_ZIP_CACHE_LVL >= 2)
678                 return la < fe->headoffset;
679
680         return false;
681 }
682 #else
683 static inline bool
684 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
685 {
686         return false;
687 }
688 #endif
689
690 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
691                                 struct page *page,
692                                 struct list_head *page_pool)
693 {
694         struct super_block *const sb = fe->inode->i_sb;
695         struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
696         struct erofs_map_blocks *const map = &fe->map;
697         struct z_erofs_vle_work_builder *const builder = &fe->builder;
698         const loff_t offset = page_offset(page);
699
700         bool tight = builder_is_hooked(builder);
701         struct z_erofs_vle_work *work = builder->work;
702
703         enum z_erofs_cache_alloctype cache_strategy;
704         enum z_erofs_page_type page_type;
705         unsigned int cur, end, spiltted, index;
706         int err = 0;
707
708         /* register locked file pages as online pages in pack */
709         z_erofs_onlinepage_init(page);
710
711         spiltted = 0;
712         end = PAGE_SIZE;
713 repeat:
714         cur = end - 1;
715
716         /* lucky, within the range of the current map_blocks */
717         if (offset + cur >= map->m_la &&
718                 offset + cur < map->m_la + map->m_llen) {
719                 /* didn't get a valid unzip work previously (very rare) */
720                 if (!builder->work)
721                         goto restart_now;
722                 goto hitted;
723         }
724
725         /* go ahead the next map_blocks */
726         debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
727
728         if (z_erofs_vle_work_iter_end(builder))
729                 fe->backmost = false;
730
731         map->m_la = offset + cur;
732         map->m_llen = 0;
733         err = z_erofs_map_blocks_iter(fe->inode, map, 0);
734         if (unlikely(err))
735                 goto err_out;
736
737 restart_now:
738         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
739                 goto hitted;
740
741         DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
742         DBG_BUGON(erofs_blkoff(map->m_pa));
743
744         err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
745         if (unlikely(err))
746                 goto err_out;
747
748         /* preload all compressed pages (maybe downgrade role if necessary) */
749         if (should_alloc_managed_pages(fe, map->m_la))
750                 cache_strategy = DELAYEDALLOC;
751         else
752                 cache_strategy = DONTALLOC;
753
754         preload_compressed_pages(builder, MNGD_MAPPING(sbi),
755                                  map->m_pa / PAGE_SIZE,
756                                  map->m_plen / PAGE_SIZE,
757                                  cache_strategy, page_pool, GFP_KERNEL);
758
759         tight &= builder_is_hooked(builder);
760         work = builder->work;
761 hitted:
762         cur = end - min_t(unsigned int, offset + end - map->m_la, end);
763         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
764                 zero_user_segment(page, cur, end);
765                 goto next_part;
766         }
767
768         /* let's derive page type */
769         page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
770                 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
771                         (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
772                                 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
773
774         if (cur)
775                 tight &= builder_is_followed(builder);
776
777 retry:
778         err = z_erofs_vle_work_add_page(builder, page, page_type);
779         /* should allocate an additional staging page for pagevec */
780         if (err == -EAGAIN) {
781                 struct page *const newpage =
782                         __stagingpage_alloc(page_pool, GFP_NOFS);
783
784                 err = z_erofs_vle_work_add_page(builder,
785                         newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
786                 if (likely(!err))
787                         goto retry;
788         }
789
790         if (unlikely(err))
791                 goto err_out;
792
793         index = page->index - map->m_la / PAGE_SIZE;
794
795         /* FIXME! avoid the last relundant fixup & endio */
796         z_erofs_onlinepage_fixup(page, index, true);
797
798         /* bump up the number of spiltted parts of a page */
799         ++spiltted;
800         /* also update nr_pages */
801         work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
802 next_part:
803         /* can be used for verification */
804         map->m_llen = offset + cur - map->m_la;
805
806         end = cur;
807         if (end > 0)
808                 goto repeat;
809
810 out:
811         /* FIXME! avoid the last relundant fixup & endio */
812         z_erofs_onlinepage_endio(page);
813
814         debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
815                 __func__, page, spiltted, map->m_llen);
816         return err;
817
818         /* if some error occurred while processing this page */
819 err_out:
820         SetPageError(page);
821         goto out;
822 }
823
824 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
825 {
826         tagptr1_t t = tagptr_init(tagptr1_t, ptr);
827         struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
828         bool background = tagptr_unfold_tags(t);
829
830         if (!background) {
831                 unsigned long flags;
832
833                 spin_lock_irqsave(&io->u.wait.lock, flags);
834                 if (!atomic_add_return(bios, &io->pending_bios))
835                         wake_up_locked(&io->u.wait);
836                 spin_unlock_irqrestore(&io->u.wait.lock, flags);
837                 return;
838         }
839
840         if (!atomic_add_return(bios, &io->pending_bios))
841                 queue_work(z_erofs_workqueue, &io->u.work);
842 }
843
844 static inline void z_erofs_vle_read_endio(struct bio *bio)
845 {
846         const blk_status_t err = bio->bi_status;
847         unsigned int i;
848         struct bio_vec *bvec;
849 #ifdef EROFS_FS_HAS_MANAGED_CACHE
850         struct address_space *mc = NULL;
851 #endif
852
853         bio_for_each_segment_all(bvec, bio, i) {
854                 struct page *page = bvec->bv_page;
855                 bool cachemngd = false;
856
857                 DBG_BUGON(PageUptodate(page));
858                 DBG_BUGON(!page->mapping);
859
860 #ifdef EROFS_FS_HAS_MANAGED_CACHE
861                 if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
862                         struct inode *const inode = page->mapping->host;
863                         struct super_block *const sb = inode->i_sb;
864
865                         mc = MNGD_MAPPING(EROFS_SB(sb));
866                 }
867
868                 /*
869                  * If mc has not gotten, it equals NULL,
870                  * however, page->mapping never be NULL if working properly.
871                  */
872                 cachemngd = (page->mapping == mc);
873 #endif
874
875                 if (unlikely(err))
876                         SetPageError(page);
877                 else if (cachemngd)
878                         SetPageUptodate(page);
879
880                 if (cachemngd)
881                         unlock_page(page);
882         }
883
884         z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
885         bio_put(bio);
886 }
887
888 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
889 static DEFINE_MUTEX(z_pagemap_global_lock);
890
891 static int z_erofs_vle_unzip(struct super_block *sb,
892         struct z_erofs_vle_workgroup *grp,
893         struct list_head *page_pool)
894 {
895         struct erofs_sb_info *const sbi = EROFS_SB(sb);
896         const unsigned int clusterpages = erofs_clusterpages(sbi);
897
898         struct z_erofs_pagevec_ctor ctor;
899         unsigned int nr_pages;
900         unsigned int sparsemem_pages = 0;
901         struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
902         struct page **pages, **compressed_pages, *page;
903         unsigned int i, llen;
904
905         enum z_erofs_page_type page_type;
906         bool overlapped;
907         struct z_erofs_vle_work *work;
908         void *vout;
909         int err;
910
911         might_sleep();
912         work = z_erofs_vle_grab_primary_work(grp);
913         DBG_BUGON(!READ_ONCE(work->nr_pages));
914
915         mutex_lock(&work->lock);
916         nr_pages = work->nr_pages;
917
918         if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
919                 pages = pages_onstack;
920         else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
921                 mutex_trylock(&z_pagemap_global_lock))
922                 pages = z_pagemap_global;
923         else {
924 repeat:
925                 pages = kvmalloc_array(nr_pages,
926                         sizeof(struct page *), GFP_KERNEL);
927
928                 /* fallback to global pagemap for the lowmem scenario */
929                 if (unlikely(!pages)) {
930                         if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
931                                 goto repeat;
932                         else {
933                                 mutex_lock(&z_pagemap_global_lock);
934                                 pages = z_pagemap_global;
935                         }
936                 }
937         }
938
939         for (i = 0; i < nr_pages; ++i)
940                 pages[i] = NULL;
941
942         z_erofs_pagevec_ctor_init(&ctor,
943                 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
944
945         for (i = 0; i < work->vcnt; ++i) {
946                 unsigned int pagenr;
947
948                 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
949
950                 /* all pages in pagevec ought to be valid */
951                 DBG_BUGON(!page);
952                 DBG_BUGON(!page->mapping);
953
954                 if (z_erofs_gather_if_stagingpage(page_pool, page))
955                         continue;
956
957                 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
958                         pagenr = 0;
959                 else
960                         pagenr = z_erofs_onlinepage_index(page);
961
962                 DBG_BUGON(pagenr >= nr_pages);
963                 DBG_BUGON(pages[pagenr]);
964
965                 pages[pagenr] = page;
966         }
967         sparsemem_pages = i;
968
969         z_erofs_pagevec_ctor_exit(&ctor, true);
970
971         overlapped = false;
972         compressed_pages = grp->compressed_pages;
973
974         for (i = 0; i < clusterpages; ++i) {
975                 unsigned int pagenr;
976
977                 page = compressed_pages[i];
978
979                 /* all compressed pages ought to be valid */
980                 DBG_BUGON(!page);
981                 DBG_BUGON(!page->mapping);
982
983                 if (z_erofs_is_stagingpage(page))
984                         continue;
985 #ifdef EROFS_FS_HAS_MANAGED_CACHE
986                 if (page->mapping == MNGD_MAPPING(sbi)) {
987                         DBG_BUGON(!PageUptodate(page));
988                         continue;
989                 }
990 #endif
991
992                 /* only non-head page could be reused as a compressed page */
993                 pagenr = z_erofs_onlinepage_index(page);
994
995                 DBG_BUGON(pagenr >= nr_pages);
996                 DBG_BUGON(pages[pagenr]);
997                 ++sparsemem_pages;
998                 pages[pagenr] = page;
999
1000                 overlapped = true;
1001         }
1002
1003         llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1004
1005         if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
1006                 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
1007                         pages, nr_pages, work->pageofs);
1008                 goto out;
1009         }
1010
1011         if (llen > grp->llen)
1012                 llen = grp->llen;
1013
1014         err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
1015                                             pages, llen, work->pageofs);
1016         if (err != -ENOTSUPP)
1017                 goto out;
1018
1019         if (sparsemem_pages >= nr_pages)
1020                 goto skip_allocpage;
1021
1022         for (i = 0; i < nr_pages; ++i) {
1023                 if (pages[i])
1024                         continue;
1025
1026                 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
1027         }
1028
1029 skip_allocpage:
1030         vout = erofs_vmap(pages, nr_pages);
1031
1032         err = z_erofs_vle_unzip_vmap(compressed_pages,
1033                 clusterpages, vout, llen, work->pageofs, overlapped);
1034
1035         erofs_vunmap(vout, nr_pages);
1036
1037 out:
1038         /* must handle all compressed pages before endding pages */
1039         for (i = 0; i < clusterpages; ++i) {
1040                 page = compressed_pages[i];
1041
1042 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1043                 if (page->mapping == MNGD_MAPPING(sbi))
1044                         continue;
1045 #endif
1046                 /* recycle all individual staging pages */
1047                 (void)z_erofs_gather_if_stagingpage(page_pool, page);
1048
1049                 WRITE_ONCE(compressed_pages[i], NULL);
1050         }
1051
1052         for (i = 0; i < nr_pages; ++i) {
1053                 page = pages[i];
1054                 if (!page)
1055                         continue;
1056
1057                 DBG_BUGON(!page->mapping);
1058
1059                 /* recycle all individual staging pages */
1060                 if (z_erofs_gather_if_stagingpage(page_pool, page))
1061                         continue;
1062
1063                 if (unlikely(err < 0))
1064                         SetPageError(page);
1065
1066                 z_erofs_onlinepage_endio(page);
1067         }
1068
1069         if (pages == z_pagemap_global)
1070                 mutex_unlock(&z_pagemap_global_lock);
1071         else if (unlikely(pages != pages_onstack))
1072                 kvfree(pages);
1073
1074         work->nr_pages = 0;
1075         work->vcnt = 0;
1076
1077         /* all work locks MUST be taken before the following line */
1078
1079         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1080
1081         /* all work locks SHOULD be released right now */
1082         mutex_unlock(&work->lock);
1083
1084         z_erofs_vle_work_release(work);
1085         return err;
1086 }
1087
1088 static void z_erofs_vle_unzip_all(struct super_block *sb,
1089                                   struct z_erofs_vle_unzip_io *io,
1090                                   struct list_head *page_pool)
1091 {
1092         z_erofs_vle_owned_workgrp_t owned = io->head;
1093
1094         while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1095                 struct z_erofs_vle_workgroup *grp;
1096
1097                 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1098                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1099
1100                 /* no possible that 'owned' equals NULL */
1101                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1102
1103                 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1104                 owned = READ_ONCE(grp->next);
1105
1106                 z_erofs_vle_unzip(sb, grp, page_pool);
1107         }
1108 }
1109
1110 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1111 {
1112         struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1113                 struct z_erofs_vle_unzip_io_sb, io.u.work);
1114         LIST_HEAD(page_pool);
1115
1116         DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1117         z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1118
1119         put_pages_list(&page_pool);
1120         kvfree(iosb);
1121 }
1122
1123 static struct page *
1124 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1125                            unsigned int nr,
1126                            struct list_head *pagepool,
1127                            struct address_space *mc,
1128                            gfp_t gfp)
1129 {
1130         /* determined at compile time to avoid too many #ifdefs */
1131         const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1132         const pgoff_t index = grp->obj.index;
1133         bool tocache = false;
1134
1135         struct address_space *mapping;
1136         struct page *oldpage, *page;
1137
1138         compressed_page_t t;
1139         int justfound;
1140
1141 repeat:
1142         page = READ_ONCE(grp->compressed_pages[nr]);
1143         oldpage = page;
1144
1145         if (!page)
1146                 goto out_allocpage;
1147
1148         /*
1149          * the cached page has not been allocated and
1150          * an placeholder is out there, prepare it now.
1151          */
1152         if (!nocache && page == PAGE_UNALLOCATED) {
1153                 tocache = true;
1154                 goto out_allocpage;
1155         }
1156
1157         /* process the target tagged pointer */
1158         t = tagptr_init(compressed_page_t, page);
1159         justfound = tagptr_unfold_tags(t);
1160         page = tagptr_unfold_ptr(t);
1161
1162         mapping = READ_ONCE(page->mapping);
1163
1164         /*
1165          * if managed cache is disabled, it's no way to
1166          * get such a cached-like page.
1167          */
1168         if (nocache) {
1169                 /* if managed cache is disabled, it is impossible `justfound' */
1170                 DBG_BUGON(justfound);
1171
1172                 /* and it should be locked, not uptodate, and not truncated */
1173                 DBG_BUGON(!PageLocked(page));
1174                 DBG_BUGON(PageUptodate(page));
1175                 DBG_BUGON(!mapping);
1176                 goto out;
1177         }
1178
1179         /*
1180          * unmanaged (file) pages are all locked solidly,
1181          * therefore it is impossible for `mapping' to be NULL.
1182          */
1183         if (mapping && mapping != mc)
1184                 /* ought to be unmanaged pages */
1185                 goto out;
1186
1187         lock_page(page);
1188
1189         /* only true if page reclaim goes wrong, should never happen */
1190         DBG_BUGON(justfound && PagePrivate(page));
1191
1192         /* the page is still in manage cache */
1193         if (page->mapping == mc) {
1194                 WRITE_ONCE(grp->compressed_pages[nr], page);
1195
1196                 if (!PagePrivate(page)) {
1197                         /*
1198                          * impossible to be !PagePrivate(page) for
1199                          * the current restriction as well if
1200                          * the page is already in compressed_pages[].
1201                          */
1202                         DBG_BUGON(!justfound);
1203
1204                         justfound = 0;
1205                         set_page_private(page, (unsigned long)grp);
1206                         SetPagePrivate(page);
1207                 }
1208
1209                 /* no need to submit io if it is already up-to-date */
1210                 if (PageUptodate(page)) {
1211                         unlock_page(page);
1212                         page = NULL;
1213                 }
1214                 goto out;
1215         }
1216
1217         /*
1218          * the managed page has been truncated, it's unsafe to
1219          * reuse this one, let's allocate a new cache-managed page.
1220          */
1221         DBG_BUGON(page->mapping);
1222         DBG_BUGON(!justfound);
1223
1224         tocache = true;
1225         unlock_page(page);
1226         put_page(page);
1227 out_allocpage:
1228         page = __stagingpage_alloc(pagepool, gfp);
1229         if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1230                 list_add(&page->lru, pagepool);
1231                 cpu_relax();
1232                 goto repeat;
1233         }
1234         if (nocache || !tocache)
1235                 goto out;
1236         if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1237                 page->mapping = Z_EROFS_MAPPING_STAGING;
1238                 goto out;
1239         }
1240
1241         set_page_private(page, (unsigned long)grp);
1242         SetPagePrivate(page);
1243 out:    /* the only exit (for tracing and debugging) */
1244         return page;
1245 }
1246
1247 static struct z_erofs_vle_unzip_io *
1248 jobqueue_init(struct super_block *sb,
1249               struct z_erofs_vle_unzip_io *io,
1250               bool foreground)
1251 {
1252         struct z_erofs_vle_unzip_io_sb *iosb;
1253
1254         if (foreground) {
1255                 /* waitqueue available for foreground io */
1256                 DBG_BUGON(!io);
1257
1258                 init_waitqueue_head(&io->u.wait);
1259                 atomic_set(&io->pending_bios, 0);
1260                 goto out;
1261         }
1262
1263         iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1264                         GFP_KERNEL | __GFP_NOFAIL);
1265         DBG_BUGON(!iosb);
1266
1267         /* initialize fields in the allocated descriptor */
1268         io = &iosb->io;
1269         iosb->sb = sb;
1270         INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1271 out:
1272         io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1273         return io;
1274 }
1275
1276 /* define workgroup jobqueue types */
1277 enum {
1278 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1279         JQ_BYPASS,
1280 #endif
1281         JQ_SUBMIT,
1282         NR_JOBQUEUES,
1283 };
1284
1285 static void *jobqueueset_init(struct super_block *sb,
1286                               z_erofs_vle_owned_workgrp_t qtail[],
1287                               struct z_erofs_vle_unzip_io *q[],
1288                               struct z_erofs_vle_unzip_io *fgq,
1289                               bool forcefg)
1290 {
1291 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1292         /*
1293          * if managed cache is enabled, bypass jobqueue is needed,
1294          * no need to read from device for all workgroups in this queue.
1295          */
1296         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1297         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1298 #endif
1299
1300         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1301         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1302
1303         return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1304 }
1305
1306 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1307 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1308                                     z_erofs_vle_owned_workgrp_t qtail[],
1309                                     z_erofs_vle_owned_workgrp_t owned_head)
1310 {
1311         z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1312         z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1313
1314         DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1315         if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1316                 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1317
1318         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1319
1320         WRITE_ONCE(*submit_qtail, owned_head);
1321         WRITE_ONCE(*bypass_qtail, &grp->next);
1322
1323         qtail[JQ_BYPASS] = &grp->next;
1324 }
1325
1326 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1327                                        unsigned int nr_bios,
1328                                        bool force_fg)
1329 {
1330         /*
1331          * although background is preferred, no one is pending for submission.
1332          * don't issue workqueue for decompression but drop it directly instead.
1333          */
1334         if (force_fg || nr_bios)
1335                 return false;
1336
1337         kvfree(container_of(q[JQ_SUBMIT],
1338                             struct z_erofs_vle_unzip_io_sb,
1339                             io));
1340         return true;
1341 }
1342 #else
1343 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1344                                     z_erofs_vle_owned_workgrp_t qtail[],
1345                                     z_erofs_vle_owned_workgrp_t owned_head)
1346 {
1347         /* impossible to bypass submission for managed cache disabled */
1348         DBG_BUGON(1);
1349 }
1350
1351 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1352                                        unsigned int nr_bios,
1353                                        bool force_fg)
1354 {
1355         /* bios should be >0 if managed cache is disabled */
1356         DBG_BUGON(!nr_bios);
1357         return false;
1358 }
1359 #endif
1360
1361 static bool z_erofs_vle_submit_all(struct super_block *sb,
1362                                    z_erofs_vle_owned_workgrp_t owned_head,
1363                                    struct list_head *pagepool,
1364                                    struct z_erofs_vle_unzip_io *fgq,
1365                                    bool force_fg)
1366 {
1367         struct erofs_sb_info *const sbi = EROFS_SB(sb);
1368         const unsigned int clusterpages = erofs_clusterpages(sbi);
1369         const gfp_t gfp = GFP_NOFS;
1370
1371         z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1372         struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1373         struct bio *bio;
1374         void *bi_private;
1375         /* since bio will be NULL, no need to initialize last_index */
1376         pgoff_t uninitialized_var(last_index);
1377         bool force_submit = false;
1378         unsigned int nr_bios;
1379
1380         if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1381                 return false;
1382
1383         force_submit = false;
1384         bio = NULL;
1385         nr_bios = 0;
1386         bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1387
1388         /* by default, all need io submission */
1389         q[JQ_SUBMIT]->head = owned_head;
1390
1391         do {
1392                 struct z_erofs_vle_workgroup *grp;
1393                 pgoff_t first_index;
1394                 struct page *page;
1395                 unsigned int i = 0, bypass = 0;
1396                 int err;
1397
1398                 /* no possible 'owned_head' equals the following */
1399                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1400                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1401
1402                 grp = container_of(owned_head,
1403                                    struct z_erofs_vle_workgroup, next);
1404
1405                 /* close the main owned chain at first */
1406                 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1407                                      Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1408
1409                 first_index = grp->obj.index;
1410                 force_submit |= (first_index != last_index + 1);
1411
1412 repeat:
1413                 page = pickup_page_for_submission(grp, i, pagepool,
1414                                                   MNGD_MAPPING(sbi), gfp);
1415                 if (!page) {
1416                         force_submit = true;
1417                         ++bypass;
1418                         goto skippage;
1419                 }
1420
1421                 if (bio && force_submit) {
1422 submit_bio_retry:
1423                         __submit_bio(bio, REQ_OP_READ, 0);
1424                         bio = NULL;
1425                 }
1426
1427                 if (!bio) {
1428                         bio = erofs_grab_bio(sb, first_index + i,
1429                                              BIO_MAX_PAGES,
1430                                              z_erofs_vle_read_endio, true);
1431                         bio->bi_private = bi_private;
1432
1433                         ++nr_bios;
1434                 }
1435
1436                 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1437                 if (err < PAGE_SIZE)
1438                         goto submit_bio_retry;
1439
1440                 force_submit = false;
1441                 last_index = first_index + i;
1442 skippage:
1443                 if (++i < clusterpages)
1444                         goto repeat;
1445
1446                 if (bypass < clusterpages)
1447                         qtail[JQ_SUBMIT] = &grp->next;
1448                 else
1449                         move_to_bypass_jobqueue(grp, qtail, owned_head);
1450         } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1451
1452         if (bio)
1453                 __submit_bio(bio, REQ_OP_READ, 0);
1454
1455         if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1456                 return true;
1457
1458         z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1459         return true;
1460 }
1461
1462 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1463                                      struct list_head *pagepool,
1464                                      bool force_fg)
1465 {
1466         struct super_block *sb = f->inode->i_sb;
1467         struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1468
1469         if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1470                 return;
1471
1472 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1473         z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1474 #endif
1475         if (!force_fg)
1476                 return;
1477
1478         /* wait until all bios are completed */
1479         wait_event(io[JQ_SUBMIT].u.wait,
1480                    !atomic_read(&io[JQ_SUBMIT].pending_bios));
1481
1482         /* let's synchronous decompression */
1483         z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1484 }
1485
1486 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1487                                              struct page *page)
1488 {
1489         struct inode *const inode = page->mapping->host;
1490         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1491         int err;
1492         LIST_HEAD(pagepool);
1493
1494         trace_erofs_readpage(page, false);
1495
1496         f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1497
1498         err = z_erofs_do_read_page(&f, page, &pagepool);
1499         (void)z_erofs_vle_work_iter_end(&f.builder);
1500
1501         if (err) {
1502                 errln("%s, failed to read, err [%d]", __func__, err);
1503                 goto out;
1504         }
1505
1506         z_erofs_submit_and_unzip(&f, &pagepool, true);
1507 out:
1508         if (f.map.mpage)
1509                 put_page(f.map.mpage);
1510
1511         /* clean up the remaining free pages */
1512         put_pages_list(&pagepool);
1513         return 0;
1514 }
1515
1516 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1517                                               struct address_space *mapping,
1518                                               struct list_head *pages,
1519                                               unsigned int nr_pages)
1520 {
1521         struct inode *const inode = mapping->host;
1522         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1523
1524         bool sync = __should_decompress_synchronously(sbi, nr_pages);
1525         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1526         gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1527         struct page *head = NULL;
1528         LIST_HEAD(pagepool);
1529
1530         trace_erofs_readpages(mapping->host, lru_to_page(pages),
1531                               nr_pages, false);
1532
1533         f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1534
1535         for (; nr_pages; --nr_pages) {
1536                 struct page *page = lru_to_page(pages);
1537
1538                 prefetchw(&page->flags);
1539                 list_del(&page->lru);
1540
1541                 /*
1542                  * A pure asynchronous readahead is indicated if
1543                  * a PG_readahead marked page is hitted at first.
1544                  * Let's also do asynchronous decompression for this case.
1545                  */
1546                 sync &= !(PageReadahead(page) && !head);
1547
1548                 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1549                         list_add(&page->lru, &pagepool);
1550                         continue;
1551                 }
1552
1553                 set_page_private(page, (unsigned long)head);
1554                 head = page;
1555         }
1556
1557         while (head) {
1558                 struct page *page = head;
1559                 int err;
1560
1561                 /* traversal in reverse order */
1562                 head = (void *)page_private(page);
1563
1564                 err = z_erofs_do_read_page(&f, page, &pagepool);
1565                 if (err) {
1566                         struct erofs_vnode *vi = EROFS_V(inode);
1567
1568                         errln("%s, readahead error at page %lu of nid %llu",
1569                                 __func__, page->index, vi->nid);
1570                 }
1571
1572                 put_page(page);
1573         }
1574
1575         (void)z_erofs_vle_work_iter_end(&f.builder);
1576
1577         z_erofs_submit_and_unzip(&f, &pagepool, sync);
1578
1579         if (f.map.mpage)
1580                 put_page(f.map.mpage);
1581
1582         /* clean up the remaining free pages */
1583         put_pages_list(&pagepool);
1584         return 0;
1585 }
1586
1587 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1588         .readpage = z_erofs_vle_normalaccess_readpage,
1589         .readpages = z_erofs_vle_normalaccess_readpages,
1590 };
1591
1592 /*
1593  * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1594  * ---
1595  * VLE compression mode attempts to compress a number of logical data into
1596  * a physical cluster with a fixed size.
1597  * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1598  */
1599 #define __vle_cluster_advise(x, bit, bits) \
1600         ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1601
1602 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1603         Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1604
1605 #define vle_cluster_type(di)    \
1606         __vle_cluster_type((di)->di_advise)
1607
1608 static int
1609 vle_decompressed_index_clusterofs(unsigned int *clusterofs,
1610                                   unsigned int clustersize,
1611                                   struct z_erofs_vle_decompressed_index *di)
1612 {
1613         switch (vle_cluster_type(di)) {
1614         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1615                 *clusterofs = clustersize;
1616                 break;
1617         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1618         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1619                 *clusterofs = le16_to_cpu(di->di_clusterofs);
1620                 break;
1621         default:
1622                 DBG_BUGON(1);
1623                 return -EIO;
1624         }
1625         return 0;
1626 }
1627
1628 static inline erofs_blk_t
1629 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1630 {
1631         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1632         struct erofs_vnode *vi = EROFS_V(inode);
1633
1634         unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1635                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1636                 index * sizeof(struct z_erofs_vle_decompressed_index);
1637
1638         return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1639 }
1640
1641 static inline unsigned int
1642 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1643 {
1644         struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1645         struct erofs_vnode *vi = EROFS_V(inode);
1646
1647         unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1648                 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1649                 index * sizeof(struct z_erofs_vle_decompressed_index);
1650
1651         return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1652 }
1653
1654 struct vle_map_blocks_iter_ctx {
1655         struct inode *inode;
1656         struct super_block *sb;
1657         unsigned int clusterbits;
1658
1659         struct page **mpage_ret;
1660         void **kaddr_ret;
1661 };
1662
1663 static int
1664 vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
1665                             unsigned int lcn,   /* logical cluster number */
1666                             unsigned long long *ofs,
1667                             erofs_blk_t *pblk,
1668                             unsigned int *flags)
1669 {
1670         const unsigned int clustersize = 1 << ctx->clusterbits;
1671         const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
1672         struct page *mpage = *ctx->mpage_ret;   /* extent metapage */
1673
1674         struct z_erofs_vle_decompressed_index *di;
1675         unsigned int cluster_type, delta0;
1676
1677         if (mpage->index != mblk) {
1678                 kunmap_atomic(*ctx->kaddr_ret);
1679                 unlock_page(mpage);
1680                 put_page(mpage);
1681
1682                 mpage = erofs_get_meta_page(ctx->sb, mblk, false);
1683                 if (IS_ERR(mpage)) {
1684                         *ctx->mpage_ret = NULL;
1685                         return PTR_ERR(mpage);
1686                 }
1687                 *ctx->mpage_ret = mpage;
1688                 *ctx->kaddr_ret = kmap_atomic(mpage);
1689         }
1690
1691         di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
1692
1693         cluster_type = vle_cluster_type(di);
1694         switch (cluster_type) {
1695         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1696                 delta0 = le16_to_cpu(di->di_u.delta[0]);
1697                 if (unlikely(!delta0 || delta0 > lcn)) {
1698                         errln("invalid NONHEAD dl0 %u at lcn %u of nid %llu",
1699                               delta0, lcn, EROFS_V(ctx->inode)->nid);
1700                         DBG_BUGON(1);
1701                         return -EIO;
1702                 }
1703                 return vle_get_logical_extent_head(ctx,
1704                         lcn - delta0, ofs, pblk, flags);
1705         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1706                 *flags ^= EROFS_MAP_ZIPPED;
1707                 /* fallthrough */
1708         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1709                 /* clustersize should be a power of two */
1710                 *ofs = ((u64)lcn << ctx->clusterbits) +
1711                         (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1712                 *pblk = le32_to_cpu(di->di_u.blkaddr);
1713                 break;
1714         default:
1715                 errln("unknown cluster type %u at lcn %u of nid %llu",
1716                       cluster_type, lcn, EROFS_V(ctx->inode)->nid);
1717                 DBG_BUGON(1);
1718                 return -EIO;
1719         }
1720         return 0;
1721 }
1722
1723 int z_erofs_map_blocks_iter(struct inode *inode,
1724         struct erofs_map_blocks *map,
1725         int flags)
1726 {
1727         void *kaddr;
1728         const struct vle_map_blocks_iter_ctx ctx = {
1729                 .inode = inode,
1730                 .sb = inode->i_sb,
1731                 .clusterbits = EROFS_I_SB(inode)->clusterbits,
1732                 .mpage_ret = &map->mpage,
1733                 .kaddr_ret = &kaddr
1734         };
1735         const unsigned int clustersize = 1 << ctx.clusterbits;
1736         /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1737         const bool initial = !map->m_llen;
1738
1739         /* logicial extent (start, end) offset */
1740         unsigned long long ofs, end;
1741         unsigned int lcn;
1742         u32 ofs_rem;
1743
1744         /* initialize `pblk' to keep gcc from printing foolish warnings */
1745         erofs_blk_t mblk, pblk = 0;
1746         struct page *mpage = map->mpage;
1747         struct z_erofs_vle_decompressed_index *di;
1748         unsigned int cluster_type, logical_cluster_ofs;
1749         int err = 0;
1750
1751         trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
1752
1753         /* when trying to read beyond EOF, leave it unmapped */
1754         if (unlikely(map->m_la >= inode->i_size)) {
1755                 DBG_BUGON(!initial);
1756                 map->m_llen = map->m_la + 1 - inode->i_size;
1757                 map->m_la = inode->i_size;
1758                 map->m_flags = 0;
1759                 goto out;
1760         }
1761
1762         debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1763                 map->m_la, map->m_llen);
1764
1765         ofs = map->m_la + map->m_llen;
1766
1767         /* clustersize should be power of two */
1768         lcn = ofs >> ctx.clusterbits;
1769         ofs_rem = ofs & (clustersize - 1);
1770
1771         mblk = vle_extent_blkaddr(inode, lcn);
1772
1773         if (!mpage || mpage->index != mblk) {
1774                 if (mpage)
1775                         put_page(mpage);
1776
1777                 mpage = erofs_get_meta_page(ctx.sb, mblk, false);
1778                 if (IS_ERR(mpage)) {
1779                         err = PTR_ERR(mpage);
1780                         goto out;
1781                 }
1782                 map->mpage = mpage;
1783         } else {
1784                 lock_page(mpage);
1785                 DBG_BUGON(!PageUptodate(mpage));
1786         }
1787
1788         kaddr = kmap_atomic(mpage);
1789         di = kaddr + vle_extent_blkoff(inode, lcn);
1790
1791         debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
1792                 mblk, vle_extent_blkoff(inode, lcn));
1793
1794         err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
1795                                                 clustersize, di);
1796         if (unlikely(err))
1797                 goto unmap_out;
1798
1799         if (!initial) {
1800                 /* [walking mode] 'map' has been already initialized */
1801                 map->m_llen += logical_cluster_ofs;
1802                 goto unmap_out;
1803         }
1804
1805         /* by default, compressed */
1806         map->m_flags |= EROFS_MAP_ZIPPED;
1807
1808         end = ((u64)lcn + 1) * clustersize;
1809
1810         cluster_type = vle_cluster_type(di);
1811
1812         switch (cluster_type) {
1813         case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1814                 if (ofs_rem >= logical_cluster_ofs)
1815                         map->m_flags ^= EROFS_MAP_ZIPPED;
1816                 /* fallthrough */
1817         case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1818                 if (ofs_rem == logical_cluster_ofs) {
1819                         pblk = le32_to_cpu(di->di_u.blkaddr);
1820                         goto exact_hitted;
1821                 }
1822
1823                 if (ofs_rem > logical_cluster_ofs) {
1824                         ofs = (u64)lcn * clustersize | logical_cluster_ofs;
1825                         pblk = le32_to_cpu(di->di_u.blkaddr);
1826                         break;
1827                 }
1828
1829                 /* logical cluster number should be >= 1 */
1830                 if (unlikely(!lcn)) {
1831                         errln("invalid logical cluster 0 at nid %llu",
1832                                 EROFS_V(inode)->nid);
1833                         err = -EIO;
1834                         goto unmap_out;
1835                 }
1836                 end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
1837                 /* fallthrough */
1838         case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1839                 /* get the correspoinding first chunk */
1840                 err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
1841                                                   &pblk, &map->m_flags);
1842                 mpage = map->mpage;
1843
1844                 if (unlikely(err)) {
1845                         if (mpage)
1846                                 goto unmap_out;
1847                         goto out;
1848                 }
1849                 break;
1850         default:
1851                 errln("unknown cluster type %u at offset %llu of nid %llu",
1852                         cluster_type, ofs, EROFS_V(inode)->nid);
1853                 err = -EIO;
1854                 goto unmap_out;
1855         }
1856
1857         map->m_la = ofs;
1858 exact_hitted:
1859         map->m_llen = end - ofs;
1860         map->m_plen = clustersize;
1861         map->m_pa = blknr_to_addr(pblk);
1862         map->m_flags |= EROFS_MAP_MAPPED;
1863 unmap_out:
1864         kunmap_atomic(kaddr);
1865         unlock_page(mpage);
1866 out:
1867         debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1868                 __func__, map->m_la, map->m_pa,
1869                 map->m_llen, map->m_plen, map->m_flags);
1870
1871         trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
1872
1873         /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1874         DBG_BUGON(err < 0 && err != -ENOMEM);
1875         return err;
1876 }
1877