1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
15 #include <linux/prefetch.h>
17 #include <trace/events/erofs.h>
20 * a compressed_pages[] placeholder in order to avoid
21 * being filled with file pages for in-place decompression.
23 #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
25 /* how to allocate cached pages for a workgroup */
26 enum z_erofs_cache_alloctype {
27 DONTALLOC, /* don't allocate any cached pages */
28 DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
32 * tagged pointer with 1-bit tag for all compressed pages
33 * tag 0 - the page is just found with an extra page reference
35 typedef tagptr1_t compressed_page_t;
37 #define tag_compressed_page_justfound(page) \
38 tagptr_fold(compressed_page_t, page, 1)
40 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
41 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
43 void z_erofs_exit_zip_subsystem(void)
45 destroy_workqueue(z_erofs_workqueue);
46 kmem_cache_destroy(z_erofs_workgroup_cachep);
49 static inline int init_unzip_workqueue(void)
51 const unsigned int onlinecpus = num_possible_cpus();
54 * we don't need too many threads, limiting threads
55 * could improve scheduling performance.
58 alloc_workqueue("erofs_unzipd",
59 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
60 onlinecpus + onlinecpus / 4);
62 return z_erofs_workqueue ? 0 : -ENOMEM;
65 static void init_once(void *ptr)
67 struct z_erofs_vle_workgroup *grp = ptr;
68 struct z_erofs_vle_work *const work =
69 z_erofs_vle_grab_primary_work(grp);
72 mutex_init(&work->lock);
75 for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
76 grp->compressed_pages[i] = NULL;
79 static void init_always(struct z_erofs_vle_workgroup *grp)
81 struct z_erofs_vle_work *const work =
82 z_erofs_vle_grab_primary_work(grp);
84 atomic_set(&grp->obj.refcount, 1);
87 DBG_BUGON(work->nr_pages);
88 DBG_BUGON(work->vcnt);
91 int __init z_erofs_init_zip_subsystem(void)
93 z_erofs_workgroup_cachep =
94 kmem_cache_create("erofs_compress",
95 Z_EROFS_WORKGROUP_SIZE, 0,
96 SLAB_RECLAIM_ACCOUNT, init_once);
98 if (z_erofs_workgroup_cachep) {
99 if (!init_unzip_workqueue())
102 kmem_cache_destroy(z_erofs_workgroup_cachep);
107 enum z_erofs_vle_work_role {
108 Z_EROFS_VLE_WORK_SECONDARY,
109 Z_EROFS_VLE_WORK_PRIMARY,
111 * The current work was the tail of an exist chain, and the previous
112 * processed chained works are all decided to be hooked up to it.
113 * A new chain should be created for the remaining unprocessed works,
114 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
115 * the next work cannot reuse the whole page in the following scenario:
116 * ________________________________________________________________
117 * | tail (partial) page | head (partial) page |
118 * | (belongs to the next work) | (belongs to the current work) |
119 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
121 Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
123 * The current work has been linked with the processed chained works,
124 * and could be also linked with the potential remaining works, which
125 * means if the processing page is the tail partial page of the work,
126 * the current work can safely use the whole page (since the next work
127 * is under control) for in-place decompression, as illustrated below:
128 * ________________________________________________________________
129 * | tail (partial) page | head (partial) page |
130 * | (of the current work) | (of the previous work) |
131 * | PRIMARY_FOLLOWED or | |
132 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
134 * [ (*) the above page can be used for the current work itself. ]
136 Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
140 struct z_erofs_vle_work_builder {
141 enum z_erofs_vle_work_role role;
143 * 'hosted = false' means that the current workgroup doesn't belong to
144 * the owned chained workgroups. In the other words, it is none of our
145 * business to submit this workgroup.
149 struct z_erofs_vle_workgroup *grp;
150 struct z_erofs_vle_work *work;
151 struct z_erofs_pagevec_ctor vector;
153 /* pages used for reading the compressed data */
154 struct page **compressed_pages;
155 unsigned int compressed_deficit;
158 #define VLE_WORK_BUILDER_INIT() \
159 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
161 #ifdef EROFS_FS_HAS_MANAGED_CACHE
162 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
163 struct address_space *mc,
165 unsigned int clusterpages,
166 enum z_erofs_cache_alloctype type,
167 struct list_head *pagepool,
170 struct page **const pages = bl->compressed_pages;
171 const unsigned int remaining = bl->compressed_deficit;
172 bool standalone = true;
173 unsigned int i, j = 0;
175 if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
178 gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
180 index += clusterpages - remaining;
182 for (i = 0; i < remaining; ++i) {
186 /* the compressed page was loaded before */
187 if (READ_ONCE(pages[i]))
190 page = find_get_page(mc, index + i);
193 t = tag_compressed_page_justfound(page);
194 } else if (type == DELAYEDALLOC) {
195 t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
196 } else { /* DONTALLOC */
203 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
209 bl->compressed_pages += j;
210 bl->compressed_deficit = remaining - j;
213 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
216 /* called by erofs_shrinker to get rid of all compressed_pages */
217 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
218 struct erofs_workgroup *egrp)
220 struct z_erofs_vle_workgroup *const grp =
221 container_of(egrp, struct z_erofs_vle_workgroup, obj);
222 struct address_space *const mapping = MNGD_MAPPING(sbi);
223 const int clusterpages = erofs_clusterpages(sbi);
227 * refcount of workgroup is now freezed as 1,
228 * therefore no need to worry about available decompression users.
230 for (i = 0; i < clusterpages; ++i) {
231 struct page *page = grp->compressed_pages[i];
233 if (!page || page->mapping != mapping)
236 /* block other users from reclaiming or migrating the page */
237 if (!trylock_page(page))
240 /* barrier is implied in the following 'unlock_page' */
241 WRITE_ONCE(grp->compressed_pages[i], NULL);
243 set_page_private(page, 0);
244 ClearPagePrivate(page);
252 int erofs_try_to_free_cached_page(struct address_space *mapping,
255 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
256 const unsigned int clusterpages = erofs_clusterpages(sbi);
257 struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
258 int ret = 0; /* 0 - busy */
260 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
263 for (i = 0; i < clusterpages; ++i) {
264 if (grp->compressed_pages[i] == page) {
265 WRITE_ONCE(grp->compressed_pages[i], NULL);
270 erofs_workgroup_unfreeze(&grp->obj, 1);
273 ClearPagePrivate(page);
280 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
281 struct address_space *mc,
283 unsigned int clusterpages,
284 enum z_erofs_cache_alloctype type,
285 struct list_head *pagepool,
288 /* nowhere to load compressed pages from */
292 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
293 static inline bool try_to_reuse_as_compressed_page(
294 struct z_erofs_vle_work_builder *b,
297 while (b->compressed_deficit) {
298 --b->compressed_deficit;
299 if (!cmpxchg(b->compressed_pages++, NULL, page))
306 /* callers must be with work->lock held */
307 static int z_erofs_vle_work_add_page(
308 struct z_erofs_vle_work_builder *builder,
310 enum z_erofs_page_type type)
315 /* give priority for the compressed data storage */
316 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
317 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
318 try_to_reuse_as_compressed_page(builder, page))
321 ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
322 page, type, &occupied);
323 builder->work->vcnt += (unsigned int)ret;
325 return ret ? 0 : -EAGAIN;
328 static enum z_erofs_vle_work_role
329 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
330 z_erofs_vle_owned_workgrp_t *owned_head,
335 /* let's claim these following types of workgroup */
337 if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
338 /* type 1, nil workgroup */
339 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
340 *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
343 *owned_head = &grp->next;
345 /* lucky, I am the followee :) */
346 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
348 } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
350 * type 2, link to the end of a existing open chain,
351 * be careful that its submission itself is governed
352 * by the original owned chain.
354 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
355 *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
357 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
358 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
361 return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
364 struct z_erofs_vle_work_finder {
365 struct super_block *sb;
367 unsigned int pageofs;
369 struct z_erofs_vle_workgroup **grp_ret;
370 enum z_erofs_vle_work_role *role;
371 z_erofs_vle_owned_workgrp_t *owned_head;
375 static struct z_erofs_vle_work *
376 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
379 struct erofs_workgroup *egrp;
380 struct z_erofs_vle_workgroup *grp;
381 struct z_erofs_vle_work *work;
383 egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
389 grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
392 work = z_erofs_vle_grab_work(grp, f->pageofs);
393 /* if multiref is disabled, `primary' is always true */
396 DBG_BUGON(work->pageofs != f->pageofs);
399 * lock must be taken first to avoid grp->next == NIL between
400 * claiming workgroup and adding pages:
404 * mutex_lock(&work->lock)
405 * add all pages to pagevec
407 * [correct locking case 1]:
408 * mutex_lock(grp->work[a])
410 * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
411 * ... *role = SECONDARY
412 * add all pages to pagevec
414 * mutex_unlock(grp->work[c])
415 * mutex_lock(grp->work[c])
420 * [correct locking case 2]:
421 * mutex_lock(grp->work[b])
423 * mutex_lock(grp->work[a])
425 * mutex_lock(grp->work[c])
429 * mutex_lock(grp->work[a])
430 * *role = PRIMARY_OWNER
431 * add all pages to pagevec
434 mutex_lock(&work->lock);
438 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
439 else /* claim the workgroup if possible */
440 *f->role = try_to_claim_workgroup(grp, f->owned_head,
445 static struct z_erofs_vle_work *
446 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
447 struct erofs_map_blocks *map)
450 struct z_erofs_vle_workgroup *grp = *f->grp_ret;
451 struct z_erofs_vle_work *work;
453 /* if multiref is disabled, grp should never be nullptr */
456 return ERR_PTR(-EINVAL);
459 /* no available workgroup, let's allocate one */
460 grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
462 return ERR_PTR(-ENOMEM);
465 grp->obj.index = f->idx;
466 grp->llen = map->m_llen;
468 z_erofs_vle_set_workgrp_fmt(grp, (map->m_flags & EROFS_MAP_ZIPPED) ?
469 Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
470 Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
472 if (map->m_flags & EROFS_MAP_FULL_MAPPED)
473 grp->flags |= Z_EROFS_VLE_WORKGRP_FULL_LENGTH;
475 /* new workgrps have been claimed as type 1 */
476 WRITE_ONCE(grp->next, *f->owned_head);
477 /* primary and followed work for all new workgrps */
478 *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
479 /* it should be submitted by ourselves */
483 work = z_erofs_vle_grab_primary_work(grp);
484 work->pageofs = f->pageofs;
487 * lock all primary followed works before visible to others
488 * and mutex_trylock *never* fails for a new workgroup.
490 mutex_trylock(&work->lock);
493 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
496 mutex_unlock(&work->lock);
497 kmem_cache_free(z_erofs_workgroup_cachep, grp);
498 return ERR_PTR(-EAGAIN);
502 *f->owned_head = &grp->next;
507 #define builder_is_hooked(builder) \
508 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
510 #define builder_is_followed(builder) \
511 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
513 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
514 struct super_block *sb,
515 struct erofs_map_blocks *map,
516 z_erofs_vle_owned_workgrp_t *owned_head)
518 const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
519 struct z_erofs_vle_workgroup *grp;
520 const struct z_erofs_vle_work_finder finder = {
522 .idx = erofs_blknr(map->m_pa),
523 .pageofs = map->m_la & ~PAGE_MASK,
525 .role = &builder->role,
526 .owned_head = owned_head,
527 .hosted = &builder->hosted
529 struct z_erofs_vle_work *work;
531 DBG_BUGON(builder->work);
533 /* must be Z_EROFS_WORK_TAIL or the next chained work */
534 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
535 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
537 DBG_BUGON(erofs_blkoff(map->m_pa));
540 work = z_erofs_vle_work_lookup(&finder);
542 unsigned int orig_llen;
544 /* increase workgroup `llen' if needed */
545 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
546 orig_llen != cmpxchg_relaxed(&grp->llen,
547 orig_llen, map->m_llen))
552 work = z_erofs_vle_work_register(&finder, map);
553 if (unlikely(work == ERR_PTR(-EAGAIN)))
557 return PTR_ERR(work);
559 z_erofs_pagevec_ctor_init(&builder->vector, Z_EROFS_NR_INLINE_PAGEVECS,
560 work->pagevec, work->vcnt);
562 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
563 /* enable possibly in-place decompression */
564 builder->compressed_pages = grp->compressed_pages;
565 builder->compressed_deficit = clusterpages;
567 builder->compressed_pages = NULL;
568 builder->compressed_deficit = 0;
572 builder->work = work;
577 * keep in mind that no referenced workgroups will be freed
578 * only after a RCU grace period, so rcu_read_lock() could
579 * prevent a workgroup from being freed.
581 static void z_erofs_rcu_callback(struct rcu_head *head)
583 struct z_erofs_vle_work *work = container_of(head,
584 struct z_erofs_vle_work, rcu);
585 struct z_erofs_vle_workgroup *grp =
586 z_erofs_vle_work_workgroup(work, true);
588 kmem_cache_free(z_erofs_workgroup_cachep, grp);
591 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
593 struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
594 struct z_erofs_vle_workgroup, obj);
595 struct z_erofs_vle_work *const work = &vgrp->work;
597 call_rcu(&work->rcu, z_erofs_rcu_callback);
601 __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
602 struct z_erofs_vle_work *work __maybe_unused)
604 erofs_workgroup_put(&grp->obj);
607 static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
609 struct z_erofs_vle_workgroup *grp =
610 z_erofs_vle_work_workgroup(work, true);
612 __z_erofs_vle_work_release(grp, work);
616 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
618 struct z_erofs_vle_work *work = builder->work;
623 z_erofs_pagevec_ctor_exit(&builder->vector, false);
624 mutex_unlock(&work->lock);
627 * if all pending pages are added, don't hold work reference
628 * any longer if the current work isn't hosted by ourselves.
630 if (!builder->hosted)
631 __z_erofs_vle_work_release(builder->grp, work);
633 builder->work = NULL;
638 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
641 struct page *page = erofs_allocpage(pagepool, gfp);
646 page->mapping = Z_EROFS_MAPPING_STAGING;
650 struct z_erofs_vle_frontend {
651 struct inode *const inode;
653 struct z_erofs_vle_work_builder builder;
654 struct erofs_map_blocks map;
656 z_erofs_vle_owned_workgrp_t owned_head;
658 /* used for applying cache strategy on the fly */
660 erofs_off_t headoffset;
663 #define VLE_FRONTEND_INIT(__i) { \
670 .builder = VLE_WORK_BUILDER_INIT(), \
671 .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
674 #ifdef EROFS_FS_HAS_MANAGED_CACHE
676 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
681 if (EROFS_FS_ZIP_CACHE_LVL >= 2)
682 return la < fe->headoffset;
688 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
694 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
696 struct list_head *page_pool)
698 struct super_block *const sb = fe->inode->i_sb;
699 struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
700 struct erofs_map_blocks *const map = &fe->map;
701 struct z_erofs_vle_work_builder *const builder = &fe->builder;
702 const loff_t offset = page_offset(page);
704 bool tight = builder_is_hooked(builder);
705 struct z_erofs_vle_work *work = builder->work;
707 enum z_erofs_cache_alloctype cache_strategy;
708 enum z_erofs_page_type page_type;
709 unsigned int cur, end, spiltted, index;
712 /* register locked file pages as online pages in pack */
713 z_erofs_onlinepage_init(page);
720 /* lucky, within the range of the current map_blocks */
721 if (offset + cur >= map->m_la &&
722 offset + cur < map->m_la + map->m_llen) {
723 /* didn't get a valid unzip work previously (very rare) */
729 /* go ahead the next map_blocks */
730 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
732 if (z_erofs_vle_work_iter_end(builder))
733 fe->backmost = false;
735 map->m_la = offset + cur;
737 err = z_erofs_map_blocks_iter(fe->inode, map, 0);
742 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
745 DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
746 DBG_BUGON(erofs_blkoff(map->m_pa));
748 err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
752 /* preload all compressed pages (maybe downgrade role if necessary) */
753 if (should_alloc_managed_pages(fe, map->m_la))
754 cache_strategy = DELAYEDALLOC;
756 cache_strategy = DONTALLOC;
758 preload_compressed_pages(builder, MNGD_MAPPING(sbi),
759 map->m_pa / PAGE_SIZE,
760 map->m_plen / PAGE_SIZE,
761 cache_strategy, page_pool, GFP_KERNEL);
763 tight &= builder_is_hooked(builder);
764 work = builder->work;
766 cur = end - min_t(unsigned int, offset + end - map->m_la, end);
767 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
768 zero_user_segment(page, cur, end);
772 /* let's derive page type */
773 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
774 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
775 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
776 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
779 tight &= builder_is_followed(builder);
782 err = z_erofs_vle_work_add_page(builder, page, page_type);
783 /* should allocate an additional staging page for pagevec */
784 if (err == -EAGAIN) {
785 struct page *const newpage =
786 __stagingpage_alloc(page_pool, GFP_NOFS);
788 err = z_erofs_vle_work_add_page(builder, newpage,
789 Z_EROFS_PAGE_TYPE_EXCLUSIVE);
797 index = page->index - map->m_la / PAGE_SIZE;
799 /* FIXME! avoid the last relundant fixup & endio */
800 z_erofs_onlinepage_fixup(page, index, true);
802 /* bump up the number of spiltted parts of a page */
804 /* also update nr_pages */
805 work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
807 /* can be used for verification */
808 map->m_llen = offset + cur - map->m_la;
815 /* FIXME! avoid the last relundant fixup & endio */
816 z_erofs_onlinepage_endio(page);
818 debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
819 __func__, page, spiltted, map->m_llen);
822 /* if some error occurred while processing this page */
828 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
830 tagptr1_t t = tagptr_init(tagptr1_t, ptr);
831 struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
832 bool background = tagptr_unfold_tags(t);
837 spin_lock_irqsave(&io->u.wait.lock, flags);
838 if (!atomic_add_return(bios, &io->pending_bios))
839 wake_up_locked(&io->u.wait);
840 spin_unlock_irqrestore(&io->u.wait.lock, flags);
844 if (!atomic_add_return(bios, &io->pending_bios))
845 queue_work(z_erofs_workqueue, &io->u.work);
848 static inline void z_erofs_vle_read_endio(struct bio *bio)
850 struct erofs_sb_info *sbi = NULL;
851 blk_status_t err = bio->bi_status;
852 struct bio_vec *bvec;
853 struct bvec_iter_all iter_all;
855 bio_for_each_segment_all(bvec, bio, iter_all) {
856 struct page *page = bvec->bv_page;
857 bool cachemngd = false;
859 DBG_BUGON(PageUptodate(page));
860 DBG_BUGON(!page->mapping);
862 if (unlikely(!sbi && !z_erofs_page_is_staging(page))) {
863 sbi = EROFS_SB(page->mapping->host->i_sb);
865 if (time_to_inject(sbi, FAULT_READ_IO)) {
866 erofs_show_injection_info(FAULT_READ_IO);
871 /* sbi should already be gotten if the page is managed */
873 cachemngd = erofs_page_is_managed(sbi, page);
878 SetPageUptodate(page);
884 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
888 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
889 static DEFINE_MUTEX(z_pagemap_global_lock);
891 static int z_erofs_vle_unzip(struct super_block *sb,
892 struct z_erofs_vle_workgroup *grp,
893 struct list_head *page_pool)
895 struct erofs_sb_info *const sbi = EROFS_SB(sb);
896 const unsigned int clusterpages = erofs_clusterpages(sbi);
898 struct z_erofs_pagevec_ctor ctor;
899 unsigned int nr_pages;
900 unsigned int sparsemem_pages = 0;
901 struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
902 struct page **pages, **compressed_pages, *page;
903 unsigned int algorithm;
904 unsigned int i, outputsize;
906 enum z_erofs_page_type page_type;
907 bool overlapped, partial;
908 struct z_erofs_vle_work *work;
912 work = z_erofs_vle_grab_primary_work(grp);
913 DBG_BUGON(!READ_ONCE(work->nr_pages));
915 mutex_lock(&work->lock);
916 nr_pages = work->nr_pages;
918 if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
919 pages = pages_onstack;
920 else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
921 mutex_trylock(&z_pagemap_global_lock))
922 pages = z_pagemap_global;
925 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
928 /* fallback to global pagemap for the lowmem scenario */
929 if (unlikely(!pages)) {
930 if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
933 mutex_lock(&z_pagemap_global_lock);
934 pages = z_pagemap_global;
939 for (i = 0; i < nr_pages; ++i)
942 z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
945 for (i = 0; i < work->vcnt; ++i) {
948 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
950 /* all pages in pagevec ought to be valid */
952 DBG_BUGON(!page->mapping);
954 if (z_erofs_put_stagingpage(page_pool, page))
957 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
960 pagenr = z_erofs_onlinepage_index(page);
962 DBG_BUGON(pagenr >= nr_pages);
963 DBG_BUGON(pages[pagenr]);
965 pages[pagenr] = page;
969 z_erofs_pagevec_ctor_exit(&ctor, true);
972 compressed_pages = grp->compressed_pages;
975 for (i = 0; i < clusterpages; ++i) {
978 page = compressed_pages[i];
980 /* all compressed pages ought to be valid */
982 DBG_BUGON(!page->mapping);
984 if (!z_erofs_page_is_staging(page)) {
985 if (erofs_page_is_managed(sbi, page)) {
986 if (unlikely(!PageUptodate(page)))
992 * only if non-head page can be selected
993 * for inplace decompression
995 pagenr = z_erofs_onlinepage_index(page);
997 DBG_BUGON(pagenr >= nr_pages);
998 DBG_BUGON(pages[pagenr]);
1000 pages[pagenr] = page;
1005 /* PG_error needs checking for inplaced and staging pages */
1006 if (unlikely(PageError(page))) {
1007 DBG_BUGON(PageUptodate(page));
1015 if (nr_pages << PAGE_SHIFT >= work->pageofs + grp->llen) {
1016 outputsize = grp->llen;
1017 partial = !(grp->flags & Z_EROFS_VLE_WORKGRP_FULL_LENGTH);
1019 outputsize = (nr_pages << PAGE_SHIFT) - work->pageofs;
1023 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN)
1024 algorithm = Z_EROFS_COMPRESSION_SHIFTED;
1026 algorithm = Z_EROFS_COMPRESSION_LZ4;
1028 err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
1030 .in = compressed_pages,
1032 .pageofs_out = work->pageofs,
1033 .inputsize = PAGE_SIZE,
1034 .outputsize = outputsize,
1036 .inplace_io = overlapped,
1037 .partial_decoding = partial
1041 /* must handle all compressed pages before endding pages */
1042 for (i = 0; i < clusterpages; ++i) {
1043 page = compressed_pages[i];
1045 if (erofs_page_is_managed(sbi, page))
1048 /* recycle all individual staging pages */
1049 (void)z_erofs_put_stagingpage(page_pool, page);
1051 WRITE_ONCE(compressed_pages[i], NULL);
1054 for (i = 0; i < nr_pages; ++i) {
1059 DBG_BUGON(!page->mapping);
1061 /* recycle all individual staging pages */
1062 if (z_erofs_put_stagingpage(page_pool, page))
1065 if (unlikely(err < 0))
1068 z_erofs_onlinepage_endio(page);
1071 if (pages == z_pagemap_global)
1072 mutex_unlock(&z_pagemap_global_lock);
1073 else if (unlikely(pages != pages_onstack))
1079 /* all work locks MUST be taken before the following line */
1081 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1083 /* all work locks SHOULD be released right now */
1084 mutex_unlock(&work->lock);
1086 z_erofs_vle_work_release(work);
1090 static void z_erofs_vle_unzip_all(struct super_block *sb,
1091 struct z_erofs_vle_unzip_io *io,
1092 struct list_head *page_pool)
1094 z_erofs_vle_owned_workgrp_t owned = io->head;
1096 while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1097 struct z_erofs_vle_workgroup *grp;
1099 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1100 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1102 /* no possible that 'owned' equals NULL */
1103 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1105 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1106 owned = READ_ONCE(grp->next);
1108 z_erofs_vle_unzip(sb, grp, page_pool);
1112 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1114 struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1115 struct z_erofs_vle_unzip_io_sb, io.u.work);
1116 LIST_HEAD(page_pool);
1118 DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1119 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1121 put_pages_list(&page_pool);
1125 static struct page *
1126 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1128 struct list_head *pagepool,
1129 struct address_space *mc,
1132 /* determined at compile time to avoid too many #ifdefs */
1133 const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1134 const pgoff_t index = grp->obj.index;
1135 bool tocache = false;
1137 struct address_space *mapping;
1138 struct page *oldpage, *page;
1140 compressed_page_t t;
1144 page = READ_ONCE(grp->compressed_pages[nr]);
1151 * the cached page has not been allocated and
1152 * an placeholder is out there, prepare it now.
1154 if (!nocache && page == PAGE_UNALLOCATED) {
1159 /* process the target tagged pointer */
1160 t = tagptr_init(compressed_page_t, page);
1161 justfound = tagptr_unfold_tags(t);
1162 page = tagptr_unfold_ptr(t);
1164 mapping = READ_ONCE(page->mapping);
1167 * if managed cache is disabled, it's no way to
1168 * get such a cached-like page.
1171 /* if managed cache is disabled, it is impossible `justfound' */
1172 DBG_BUGON(justfound);
1174 /* and it should be locked, not uptodate, and not truncated */
1175 DBG_BUGON(!PageLocked(page));
1176 DBG_BUGON(PageUptodate(page));
1177 DBG_BUGON(!mapping);
1182 * unmanaged (file) pages are all locked solidly,
1183 * therefore it is impossible for `mapping' to be NULL.
1185 if (mapping && mapping != mc)
1186 /* ought to be unmanaged pages */
1191 /* only true if page reclaim goes wrong, should never happen */
1192 DBG_BUGON(justfound && PagePrivate(page));
1194 /* the page is still in manage cache */
1195 if (page->mapping == mc) {
1196 WRITE_ONCE(grp->compressed_pages[nr], page);
1198 ClearPageError(page);
1199 if (!PagePrivate(page)) {
1201 * impossible to be !PagePrivate(page) for
1202 * the current restriction as well if
1203 * the page is already in compressed_pages[].
1205 DBG_BUGON(!justfound);
1208 set_page_private(page, (unsigned long)grp);
1209 SetPagePrivate(page);
1212 /* no need to submit io if it is already up-to-date */
1213 if (PageUptodate(page)) {
1221 * the managed page has been truncated, it's unsafe to
1222 * reuse this one, let's allocate a new cache-managed page.
1224 DBG_BUGON(page->mapping);
1225 DBG_BUGON(!justfound);
1231 page = __stagingpage_alloc(pagepool, gfp);
1232 if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1233 list_add(&page->lru, pagepool);
1237 if (nocache || !tocache)
1239 if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1240 page->mapping = Z_EROFS_MAPPING_STAGING;
1244 set_page_private(page, (unsigned long)grp);
1245 SetPagePrivate(page);
1246 out: /* the only exit (for tracing and debugging) */
1250 static struct z_erofs_vle_unzip_io *
1251 jobqueue_init(struct super_block *sb,
1252 struct z_erofs_vle_unzip_io *io,
1255 struct z_erofs_vle_unzip_io_sb *iosb;
1258 /* waitqueue available for foreground io */
1261 init_waitqueue_head(&io->u.wait);
1262 atomic_set(&io->pending_bios, 0);
1266 iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL);
1269 /* initialize fields in the allocated descriptor */
1272 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1274 io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1278 /* define workgroup jobqueue types */
1280 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1287 static void *jobqueueset_init(struct super_block *sb,
1288 z_erofs_vle_owned_workgrp_t qtail[],
1289 struct z_erofs_vle_unzip_io *q[],
1290 struct z_erofs_vle_unzip_io *fgq,
1293 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1295 * if managed cache is enabled, bypass jobqueue is needed,
1296 * no need to read from device for all workgroups in this queue.
1298 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1299 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1302 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1303 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1305 return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1308 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1309 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1310 z_erofs_vle_owned_workgrp_t qtail[],
1311 z_erofs_vle_owned_workgrp_t owned_head)
1313 z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1314 z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1316 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1317 if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1318 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1320 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1322 WRITE_ONCE(*submit_qtail, owned_head);
1323 WRITE_ONCE(*bypass_qtail, &grp->next);
1325 qtail[JQ_BYPASS] = &grp->next;
1328 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1329 unsigned int nr_bios,
1333 * although background is preferred, no one is pending for submission.
1334 * don't issue workqueue for decompression but drop it directly instead.
1336 if (force_fg || nr_bios)
1339 kvfree(container_of(q[JQ_SUBMIT],
1340 struct z_erofs_vle_unzip_io_sb,
1345 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1346 z_erofs_vle_owned_workgrp_t qtail[],
1347 z_erofs_vle_owned_workgrp_t owned_head)
1349 /* impossible to bypass submission for managed cache disabled */
1353 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1354 unsigned int nr_bios,
1357 /* bios should be >0 if managed cache is disabled */
1358 DBG_BUGON(!nr_bios);
1363 static bool z_erofs_vle_submit_all(struct super_block *sb,
1364 z_erofs_vle_owned_workgrp_t owned_head,
1365 struct list_head *pagepool,
1366 struct z_erofs_vle_unzip_io *fgq,
1369 struct erofs_sb_info *const sbi = EROFS_SB(sb);
1370 const unsigned int clusterpages = erofs_clusterpages(sbi);
1371 const gfp_t gfp = GFP_NOFS;
1373 z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1374 struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1377 /* since bio will be NULL, no need to initialize last_index */
1378 pgoff_t uninitialized_var(last_index);
1379 bool force_submit = false;
1380 unsigned int nr_bios;
1382 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1385 force_submit = false;
1388 bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1390 /* by default, all need io submission */
1391 q[JQ_SUBMIT]->head = owned_head;
1394 struct z_erofs_vle_workgroup *grp;
1395 pgoff_t first_index;
1397 unsigned int i = 0, bypass = 0;
1400 /* no possible 'owned_head' equals the following */
1401 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1402 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1404 grp = container_of(owned_head,
1405 struct z_erofs_vle_workgroup, next);
1407 /* close the main owned chain at first */
1408 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1409 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1411 first_index = grp->obj.index;
1412 force_submit |= (first_index != last_index + 1);
1415 page = pickup_page_for_submission(grp, i, pagepool,
1416 MNGD_MAPPING(sbi), gfp);
1418 force_submit = true;
1423 if (bio && force_submit) {
1425 __submit_bio(bio, REQ_OP_READ, 0);
1430 bio = erofs_grab_bio(sb, first_index + i,
1431 BIO_MAX_PAGES, bi_private,
1432 z_erofs_vle_read_endio, true);
1436 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1437 if (err < PAGE_SIZE)
1438 goto submit_bio_retry;
1440 force_submit = false;
1441 last_index = first_index + i;
1443 if (++i < clusterpages)
1446 if (bypass < clusterpages)
1447 qtail[JQ_SUBMIT] = &grp->next;
1449 move_to_bypass_jobqueue(grp, qtail, owned_head);
1450 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1453 __submit_bio(bio, REQ_OP_READ, 0);
1455 if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1458 z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1462 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1463 struct list_head *pagepool,
1466 struct super_block *sb = f->inode->i_sb;
1467 struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1469 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1472 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1473 z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1478 /* wait until all bios are completed */
1479 wait_event(io[JQ_SUBMIT].u.wait,
1480 !atomic_read(&io[JQ_SUBMIT].pending_bios));
1482 /* let's synchronous decompression */
1483 z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1486 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1489 struct inode *const inode = page->mapping->host;
1490 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1492 LIST_HEAD(pagepool);
1494 trace_erofs_readpage(page, false);
1496 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1498 err = z_erofs_do_read_page(&f, page, &pagepool);
1499 (void)z_erofs_vle_work_iter_end(&f.builder);
1502 errln("%s, failed to read, err [%d]", __func__, err);
1506 z_erofs_submit_and_unzip(&f, &pagepool, true);
1509 put_page(f.map.mpage);
1511 /* clean up the remaining free pages */
1512 put_pages_list(&pagepool);
1516 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1517 struct address_space *mapping,
1518 struct list_head *pages,
1519 unsigned int nr_pages)
1521 struct inode *const inode = mapping->host;
1522 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1524 bool sync = __should_decompress_synchronously(sbi, nr_pages);
1525 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1526 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1527 struct page *head = NULL;
1528 LIST_HEAD(pagepool);
1530 trace_erofs_readpages(mapping->host, lru_to_page(pages),
1533 f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1535 for (; nr_pages; --nr_pages) {
1536 struct page *page = lru_to_page(pages);
1538 prefetchw(&page->flags);
1539 list_del(&page->lru);
1542 * A pure asynchronous readahead is indicated if
1543 * a PG_readahead marked page is hitted at first.
1544 * Let's also do asynchronous decompression for this case.
1546 sync &= !(PageReadahead(page) && !head);
1548 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1549 list_add(&page->lru, &pagepool);
1553 set_page_private(page, (unsigned long)head);
1558 struct page *page = head;
1561 /* traversal in reverse order */
1562 head = (void *)page_private(page);
1564 err = z_erofs_do_read_page(&f, page, &pagepool);
1566 struct erofs_vnode *vi = EROFS_V(inode);
1568 errln("%s, readahead error at page %lu of nid %llu",
1569 __func__, page->index, vi->nid);
1575 (void)z_erofs_vle_work_iter_end(&f.builder);
1577 z_erofs_submit_and_unzip(&f, &pagepool, sync);
1580 put_page(f.map.mpage);
1582 /* clean up the remaining free pages */
1583 put_pages_list(&pagepool);
1587 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1588 .readpage = z_erofs_vle_normalaccess_readpage,
1589 .readpages = z_erofs_vle_normalaccess_readpages,