1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
16 #include <trace/events/erofs.h>
19 * a compressed_pages[] placeholder in order to avoid
20 * being filled with file pages for in-place decompression.
22 #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
24 /* how to allocate cached pages for a workgroup */
25 enum z_erofs_cache_alloctype {
26 DONTALLOC, /* don't allocate any cached pages */
27 DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
31 * tagged pointer with 1-bit tag for all compressed pages
32 * tag 0 - the page is just found with an extra page reference
34 typedef tagptr1_t compressed_page_t;
36 #define tag_compressed_page_justfound(page) \
37 tagptr_fold(compressed_page_t, page, 1)
39 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
40 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
42 void z_erofs_exit_zip_subsystem(void)
44 destroy_workqueue(z_erofs_workqueue);
45 kmem_cache_destroy(z_erofs_workgroup_cachep);
48 static inline int init_unzip_workqueue(void)
50 const unsigned int onlinecpus = num_possible_cpus();
53 * we don't need too many threads, limiting threads
54 * could improve scheduling performance.
57 alloc_workqueue("erofs_unzipd",
58 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
59 onlinecpus + onlinecpus / 4);
61 return z_erofs_workqueue ? 0 : -ENOMEM;
64 static void init_once(void *ptr)
66 struct z_erofs_vle_workgroup *grp = ptr;
67 struct z_erofs_vle_work *const work =
68 z_erofs_vle_grab_primary_work(grp);
71 mutex_init(&work->lock);
74 for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
75 grp->compressed_pages[i] = NULL;
78 static void init_always(struct z_erofs_vle_workgroup *grp)
80 struct z_erofs_vle_work *const work =
81 z_erofs_vle_grab_primary_work(grp);
83 atomic_set(&grp->obj.refcount, 1);
86 DBG_BUGON(work->nr_pages);
87 DBG_BUGON(work->vcnt);
90 int __init z_erofs_init_zip_subsystem(void)
92 z_erofs_workgroup_cachep =
93 kmem_cache_create("erofs_compress",
94 Z_EROFS_WORKGROUP_SIZE, 0,
95 SLAB_RECLAIM_ACCOUNT, init_once);
97 if (z_erofs_workgroup_cachep) {
98 if (!init_unzip_workqueue())
101 kmem_cache_destroy(z_erofs_workgroup_cachep);
106 enum z_erofs_vle_work_role {
107 Z_EROFS_VLE_WORK_SECONDARY,
108 Z_EROFS_VLE_WORK_PRIMARY,
110 * The current work has at least been linked with the following
111 * processed chained works, which means if the processing page
112 * is the tail partial page of the work, the current work can
113 * safely use the whole page, as illustrated below:
114 * +--------------+-------------------------------------------+
115 * | tail page | head page (of the previous work) |
116 * +--------------+-------------------------------------------+
117 * /\ which belongs to the current work
118 * [ (*) this page can be used for the current work itself. ]
120 Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
124 struct z_erofs_vle_work_builder {
125 enum z_erofs_vle_work_role role;
127 * 'hosted = false' means that the current workgroup doesn't belong to
128 * the owned chained workgroups. In the other words, it is none of our
129 * business to submit this workgroup.
133 struct z_erofs_vle_workgroup *grp;
134 struct z_erofs_vle_work *work;
135 struct z_erofs_pagevec_ctor vector;
137 /* pages used for reading the compressed data */
138 struct page **compressed_pages;
139 unsigned int compressed_deficit;
142 #define VLE_WORK_BUILDER_INIT() \
143 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
145 #ifdef EROFS_FS_HAS_MANAGED_CACHE
146 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
147 struct address_space *mc,
149 unsigned int clusterpages,
150 enum z_erofs_cache_alloctype type,
151 struct list_head *pagepool,
154 struct page **const pages = bl->compressed_pages;
155 const unsigned int remaining = bl->compressed_deficit;
156 bool standalone = true;
157 unsigned int i, j = 0;
159 if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
162 gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
164 index += clusterpages - remaining;
166 for (i = 0; i < remaining; ++i) {
170 /* the compressed page was loaded before */
171 if (READ_ONCE(pages[i]))
174 page = find_get_page(mc, index + i);
177 t = tag_compressed_page_justfound(page);
178 } else if (type == DELAYEDALLOC) {
179 t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
180 } else { /* DONTALLOC */
187 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
193 bl->compressed_pages += j;
194 bl->compressed_deficit = remaining - j;
197 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
200 /* called by erofs_shrinker to get rid of all compressed_pages */
201 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
202 struct erofs_workgroup *egrp)
204 struct z_erofs_vle_workgroup *const grp =
205 container_of(egrp, struct z_erofs_vle_workgroup, obj);
206 struct address_space *const mapping = MNGD_MAPPING(sbi);
207 const int clusterpages = erofs_clusterpages(sbi);
211 * refcount of workgroup is now freezed as 1,
212 * therefore no need to worry about available decompression users.
214 for (i = 0; i < clusterpages; ++i) {
215 struct page *page = grp->compressed_pages[i];
217 if (!page || page->mapping != mapping)
220 /* block other users from reclaiming or migrating the page */
221 if (!trylock_page(page))
224 /* barrier is implied in the following 'unlock_page' */
225 WRITE_ONCE(grp->compressed_pages[i], NULL);
227 set_page_private(page, 0);
228 ClearPagePrivate(page);
236 int erofs_try_to_free_cached_page(struct address_space *mapping,
239 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
240 const unsigned int clusterpages = erofs_clusterpages(sbi);
242 struct z_erofs_vle_workgroup *grp;
243 int ret = 0; /* 0 - busy */
245 /* prevent the workgroup from being freed */
247 grp = (void *)page_private(page);
249 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
252 for (i = 0; i < clusterpages; ++i) {
253 if (grp->compressed_pages[i] == page) {
254 WRITE_ONCE(grp->compressed_pages[i], NULL);
259 erofs_workgroup_unfreeze(&grp->obj, 1);
264 ClearPagePrivate(page);
270 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
271 struct address_space *mc,
273 unsigned int clusterpages,
274 enum z_erofs_cache_alloctype type,
275 struct list_head *pagepool,
278 /* nowhere to load compressed pages from */
282 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
283 static inline bool try_to_reuse_as_compressed_page(
284 struct z_erofs_vle_work_builder *b,
287 while (b->compressed_deficit) {
288 --b->compressed_deficit;
289 if (!cmpxchg(b->compressed_pages++, NULL, page))
296 /* callers must be with work->lock held */
297 static int z_erofs_vle_work_add_page(
298 struct z_erofs_vle_work_builder *builder,
300 enum z_erofs_page_type type)
305 /* give priority for the compressed data storage */
306 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
307 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
308 try_to_reuse_as_compressed_page(builder, page))
311 ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
312 page, type, &occupied);
313 builder->work->vcnt += (unsigned int)ret;
315 return ret ? 0 : -EAGAIN;
318 static inline bool try_to_claim_workgroup(
319 struct z_erofs_vle_workgroup *grp,
320 z_erofs_vle_owned_workgrp_t *owned_head,
323 DBG_BUGON(*hosted == true);
325 /* let's claim these following types of workgroup */
327 if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
328 /* type 1, nil workgroup */
329 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
330 *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
333 *owned_head = &grp->next;
335 } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
337 * type 2, link to the end of a existing open chain,
338 * be careful that its submission itself is governed
339 * by the original owned chain.
341 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
342 *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
345 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
347 return false; /* :( better luck next time */
349 return true; /* lucky, I am the followee :) */
352 struct z_erofs_vle_work_finder {
353 struct super_block *sb;
355 unsigned int pageofs;
357 struct z_erofs_vle_workgroup **grp_ret;
358 enum z_erofs_vle_work_role *role;
359 z_erofs_vle_owned_workgrp_t *owned_head;
363 static struct z_erofs_vle_work *
364 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
367 struct erofs_workgroup *egrp;
368 struct z_erofs_vle_workgroup *grp;
369 struct z_erofs_vle_work *work;
371 egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
377 grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
380 work = z_erofs_vle_grab_work(grp, f->pageofs);
381 /* if multiref is disabled, `primary' is always true */
384 DBG_BUGON(work->pageofs != f->pageofs);
387 * lock must be taken first to avoid grp->next == NIL between
388 * claiming workgroup and adding pages:
392 * mutex_lock(&work->lock)
393 * add all pages to pagevec
395 * [correct locking case 1]:
396 * mutex_lock(grp->work[a])
398 * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
399 * ... *role = SECONDARY
400 * add all pages to pagevec
402 * mutex_unlock(grp->work[c])
403 * mutex_lock(grp->work[c])
408 * [correct locking case 2]:
409 * mutex_lock(grp->work[b])
411 * mutex_lock(grp->work[a])
413 * mutex_lock(grp->work[c])
417 * mutex_lock(grp->work[a])
418 * *role = PRIMARY_OWNER
419 * add all pages to pagevec
422 mutex_lock(&work->lock);
426 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
427 /* claim the workgroup if possible */
428 else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
429 *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
431 *f->role = Z_EROFS_VLE_WORK_PRIMARY;
436 static struct z_erofs_vle_work *
437 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
438 struct erofs_map_blocks *map)
441 struct z_erofs_vle_workgroup *grp = *f->grp_ret;
442 struct z_erofs_vle_work *work;
444 /* if multiref is disabled, grp should never be nullptr */
447 return ERR_PTR(-EINVAL);
450 /* no available workgroup, let's allocate one */
451 grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
453 return ERR_PTR(-ENOMEM);
456 grp->obj.index = f->idx;
457 grp->llen = map->m_llen;
459 z_erofs_vle_set_workgrp_fmt(grp,
460 (map->m_flags & EROFS_MAP_ZIPPED) ?
461 Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
462 Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
464 /* new workgrps have been claimed as type 1 */
465 WRITE_ONCE(grp->next, *f->owned_head);
466 /* primary and followed work for all new workgrps */
467 *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
468 /* it should be submitted by ourselves */
472 work = z_erofs_vle_grab_primary_work(grp);
473 work->pageofs = f->pageofs;
476 * lock all primary followed works before visible to others
477 * and mutex_trylock *never* fails for a new workgroup.
479 mutex_trylock(&work->lock);
482 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
485 mutex_unlock(&work->lock);
486 kmem_cache_free(z_erofs_workgroup_cachep, grp);
487 return ERR_PTR(-EAGAIN);
491 *f->owned_head = &grp->next;
496 #define builder_is_followed(builder) \
497 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
499 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
500 struct super_block *sb,
501 struct erofs_map_blocks *map,
502 z_erofs_vle_owned_workgrp_t *owned_head)
504 const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
505 struct z_erofs_vle_workgroup *grp;
506 const struct z_erofs_vle_work_finder finder = {
508 .idx = erofs_blknr(map->m_pa),
509 .pageofs = map->m_la & ~PAGE_MASK,
511 .role = &builder->role,
512 .owned_head = owned_head,
513 .hosted = &builder->hosted
515 struct z_erofs_vle_work *work;
517 DBG_BUGON(builder->work);
519 /* must be Z_EROFS_WORK_TAIL or the next chained work */
520 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
521 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
523 DBG_BUGON(erofs_blkoff(map->m_pa));
526 work = z_erofs_vle_work_lookup(&finder);
528 unsigned int orig_llen;
530 /* increase workgroup `llen' if needed */
531 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
532 orig_llen != cmpxchg_relaxed(&grp->llen,
533 orig_llen, map->m_llen))
538 work = z_erofs_vle_work_register(&finder, map);
539 if (unlikely(work == ERR_PTR(-EAGAIN)))
542 if (unlikely(IS_ERR(work)))
543 return PTR_ERR(work);
545 z_erofs_pagevec_ctor_init(&builder->vector,
546 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
548 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
549 /* enable possibly in-place decompression */
550 builder->compressed_pages = grp->compressed_pages;
551 builder->compressed_deficit = clusterpages;
553 builder->compressed_pages = NULL;
554 builder->compressed_deficit = 0;
558 builder->work = work;
563 * keep in mind that no referenced workgroups will be freed
564 * only after a RCU grace period, so rcu_read_lock() could
565 * prevent a workgroup from being freed.
567 static void z_erofs_rcu_callback(struct rcu_head *head)
569 struct z_erofs_vle_work *work = container_of(head,
570 struct z_erofs_vle_work, rcu);
571 struct z_erofs_vle_workgroup *grp =
572 z_erofs_vle_work_workgroup(work, true);
574 kmem_cache_free(z_erofs_workgroup_cachep, grp);
577 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
579 struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
580 struct z_erofs_vle_workgroup, obj);
581 struct z_erofs_vle_work *const work = &vgrp->work;
583 call_rcu(&work->rcu, z_erofs_rcu_callback);
586 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
587 struct z_erofs_vle_work *work __maybe_unused)
589 erofs_workgroup_put(&grp->obj);
592 void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
594 struct z_erofs_vle_workgroup *grp =
595 z_erofs_vle_work_workgroup(work, true);
597 __z_erofs_vle_work_release(grp, work);
601 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
603 struct z_erofs_vle_work *work = builder->work;
608 z_erofs_pagevec_ctor_exit(&builder->vector, false);
609 mutex_unlock(&work->lock);
612 * if all pending pages are added, don't hold work reference
613 * any longer if the current work isn't hosted by ourselves.
615 if (!builder->hosted)
616 __z_erofs_vle_work_release(builder->grp, work);
618 builder->work = NULL;
623 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
626 struct page *page = erofs_allocpage(pagepool, gfp);
631 page->mapping = Z_EROFS_MAPPING_STAGING;
635 struct z_erofs_vle_frontend {
636 struct inode *const inode;
638 struct z_erofs_vle_work_builder builder;
639 struct erofs_map_blocks_iter m_iter;
641 z_erofs_vle_owned_workgrp_t owned_head;
643 /* used for applying cache strategy on the fly */
645 erofs_off_t headoffset;
648 #define VLE_FRONTEND_INIT(__i) { \
651 { .m_llen = 0, .m_plen = 0 }, \
654 .builder = VLE_WORK_BUILDER_INIT(), \
655 .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
658 #ifdef EROFS_FS_HAS_MANAGED_CACHE
660 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
665 if (EROFS_FS_ZIP_CACHE_LVL >= 2)
666 return la < fe->headoffset;
672 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
678 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
680 struct list_head *page_pool)
682 struct super_block *const sb = fe->inode->i_sb;
683 struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
684 struct erofs_map_blocks_iter *const m = &fe->m_iter;
685 struct erofs_map_blocks *const map = &m->map;
686 struct z_erofs_vle_work_builder *const builder = &fe->builder;
687 const loff_t offset = page_offset(page);
689 bool tight = builder_is_followed(builder);
690 struct z_erofs_vle_work *work = builder->work;
692 enum z_erofs_cache_alloctype cache_strategy;
693 enum z_erofs_page_type page_type;
694 unsigned int cur, end, spiltted, index;
697 /* register locked file pages as online pages in pack */
698 z_erofs_onlinepage_init(page);
705 /* lucky, within the range of the current map_blocks */
706 if (offset + cur >= map->m_la &&
707 offset + cur < map->m_la + map->m_llen)
710 /* go ahead the next map_blocks */
711 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
713 if (z_erofs_vle_work_iter_end(builder))
714 fe->backmost = false;
716 map->m_la = offset + cur;
718 err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
722 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
725 DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
726 DBG_BUGON(erofs_blkoff(map->m_pa));
728 err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
732 /* preload all compressed pages (maybe downgrade role if necessary) */
733 if (should_alloc_managed_pages(fe, map->m_la))
734 cache_strategy = DELAYEDALLOC;
736 cache_strategy = DONTALLOC;
738 preload_compressed_pages(builder, MNGD_MAPPING(sbi),
739 map->m_pa / PAGE_SIZE,
740 map->m_plen / PAGE_SIZE,
741 cache_strategy, page_pool, GFP_KERNEL);
743 tight &= builder_is_followed(builder);
744 work = builder->work;
746 cur = end - min_t(unsigned int, offset + end - map->m_la, end);
747 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
748 zero_user_segment(page, cur, end);
752 /* let's derive page type */
753 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
754 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
755 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
756 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
759 err = z_erofs_vle_work_add_page(builder, page, page_type);
760 /* should allocate an additional staging page for pagevec */
761 if (err == -EAGAIN) {
762 struct page *const newpage =
763 __stagingpage_alloc(page_pool, GFP_NOFS);
765 err = z_erofs_vle_work_add_page(builder,
766 newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
774 index = page->index - map->m_la / PAGE_SIZE;
776 /* FIXME! avoid the last relundant fixup & endio */
777 z_erofs_onlinepage_fixup(page, index, true);
779 /* bump up the number of spiltted parts of a page */
781 /* also update nr_pages */
782 work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
784 /* can be used for verification */
785 map->m_llen = offset + cur - map->m_la;
792 /* FIXME! avoid the last relundant fixup & endio */
793 z_erofs_onlinepage_endio(page);
795 debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
796 __func__, page, spiltted, map->m_llen);
799 /* if some error occurred while processing this page */
805 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
807 tagptr1_t t = tagptr_init(tagptr1_t, ptr);
808 struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
809 bool background = tagptr_unfold_tags(t);
814 spin_lock_irqsave(&io->u.wait.lock, flags);
815 if (!atomic_add_return(bios, &io->pending_bios))
816 wake_up_locked(&io->u.wait);
817 spin_unlock_irqrestore(&io->u.wait.lock, flags);
821 if (!atomic_add_return(bios, &io->pending_bios))
822 queue_work(z_erofs_workqueue, &io->u.work);
825 static inline void z_erofs_vle_read_endio(struct bio *bio)
827 const blk_status_t err = bio->bi_status;
829 struct bio_vec *bvec;
830 #ifdef EROFS_FS_HAS_MANAGED_CACHE
831 struct address_space *mc = NULL;
834 bio_for_each_segment_all(bvec, bio, i) {
835 struct page *page = bvec->bv_page;
836 bool cachemngd = false;
838 DBG_BUGON(PageUptodate(page));
839 DBG_BUGON(!page->mapping);
841 #ifdef EROFS_FS_HAS_MANAGED_CACHE
842 if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
843 struct inode *const inode = page->mapping->host;
844 struct super_block *const sb = inode->i_sb;
846 mc = MNGD_MAPPING(EROFS_SB(sb));
850 * If mc has not gotten, it equals NULL,
851 * however, page->mapping never be NULL if working properly.
853 cachemngd = (page->mapping == mc);
859 SetPageUptodate(page);
865 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
869 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
870 static DEFINE_MUTEX(z_pagemap_global_lock);
872 static int z_erofs_vle_unzip(struct super_block *sb,
873 struct z_erofs_vle_workgroup *grp,
874 struct list_head *page_pool)
876 struct erofs_sb_info *const sbi = EROFS_SB(sb);
877 const unsigned int clusterpages = erofs_clusterpages(sbi);
879 struct z_erofs_pagevec_ctor ctor;
880 unsigned int nr_pages;
881 unsigned int sparsemem_pages = 0;
882 struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
883 struct page **pages, **compressed_pages, *page;
884 unsigned int i, llen;
886 enum z_erofs_page_type page_type;
888 struct z_erofs_vle_work *work;
893 work = z_erofs_vle_grab_primary_work(grp);
894 DBG_BUGON(!READ_ONCE(work->nr_pages));
896 mutex_lock(&work->lock);
897 nr_pages = work->nr_pages;
899 if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
900 pages = pages_onstack;
901 else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
902 mutex_trylock(&z_pagemap_global_lock))
903 pages = z_pagemap_global;
906 pages = kvmalloc_array(nr_pages,
907 sizeof(struct page *), GFP_KERNEL);
909 /* fallback to global pagemap for the lowmem scenario */
910 if (unlikely(!pages)) {
911 if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
914 mutex_lock(&z_pagemap_global_lock);
915 pages = z_pagemap_global;
920 for (i = 0; i < nr_pages; ++i)
923 z_erofs_pagevec_ctor_init(&ctor,
924 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
926 for (i = 0; i < work->vcnt; ++i) {
929 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
931 /* all pages in pagevec ought to be valid */
933 DBG_BUGON(!page->mapping);
935 if (z_erofs_gather_if_stagingpage(page_pool, page))
938 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
941 pagenr = z_erofs_onlinepage_index(page);
943 DBG_BUGON(pagenr >= nr_pages);
944 DBG_BUGON(pages[pagenr]);
946 pages[pagenr] = page;
950 z_erofs_pagevec_ctor_exit(&ctor, true);
953 compressed_pages = grp->compressed_pages;
955 for (i = 0; i < clusterpages; ++i) {
958 page = compressed_pages[i];
960 /* all compressed pages ought to be valid */
962 DBG_BUGON(!page->mapping);
964 if (z_erofs_is_stagingpage(page))
966 #ifdef EROFS_FS_HAS_MANAGED_CACHE
967 if (page->mapping == MNGD_MAPPING(sbi)) {
968 DBG_BUGON(!PageUptodate(page));
973 /* only non-head page could be reused as a compressed page */
974 pagenr = z_erofs_onlinepage_index(page);
976 DBG_BUGON(pagenr >= nr_pages);
977 DBG_BUGON(pages[pagenr]);
979 pages[pagenr] = page;
984 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
986 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
987 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
988 pages, nr_pages, work->pageofs);
992 if (llen > grp->llen)
995 err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
996 clusterpages, pages, llen, work->pageofs,
997 z_erofs_onlinepage_endio);
998 if (err != -ENOTSUPP)
1001 if (sparsemem_pages >= nr_pages)
1002 goto skip_allocpage;
1004 for (i = 0; i < nr_pages; ++i) {
1008 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
1012 vout = erofs_vmap(pages, nr_pages);
1014 err = z_erofs_vle_unzip_vmap(compressed_pages,
1015 clusterpages, vout, llen, work->pageofs, overlapped);
1017 erofs_vunmap(vout, nr_pages);
1020 for (i = 0; i < nr_pages; ++i) {
1022 DBG_BUGON(!page->mapping);
1024 /* recycle all individual staging pages */
1025 if (z_erofs_gather_if_stagingpage(page_pool, page))
1028 if (unlikely(err < 0))
1031 z_erofs_onlinepage_endio(page);
1035 for (i = 0; i < clusterpages; ++i) {
1036 page = compressed_pages[i];
1038 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1039 if (page->mapping == MNGD_MAPPING(sbi))
1042 /* recycle all individual staging pages */
1043 (void)z_erofs_gather_if_stagingpage(page_pool, page);
1045 WRITE_ONCE(compressed_pages[i], NULL);
1048 if (pages == z_pagemap_global)
1049 mutex_unlock(&z_pagemap_global_lock);
1050 else if (unlikely(pages != pages_onstack))
1056 /* all work locks MUST be taken before the following line */
1058 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1060 /* all work locks SHOULD be released right now */
1061 mutex_unlock(&work->lock);
1063 z_erofs_vle_work_release(work);
1067 static void z_erofs_vle_unzip_all(struct super_block *sb,
1068 struct z_erofs_vle_unzip_io *io,
1069 struct list_head *page_pool)
1071 z_erofs_vle_owned_workgrp_t owned = io->head;
1073 while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1074 struct z_erofs_vle_workgroup *grp;
1076 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1077 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1079 /* no possible that 'owned' equals NULL */
1080 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1082 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1083 owned = READ_ONCE(grp->next);
1085 z_erofs_vle_unzip(sb, grp, page_pool);
1089 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1091 struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1092 struct z_erofs_vle_unzip_io_sb, io.u.work);
1093 LIST_HEAD(page_pool);
1095 DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1096 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1098 put_pages_list(&page_pool);
1102 static struct page *
1103 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1105 struct list_head *pagepool,
1106 struct address_space *mc,
1109 /* determined at compile time to avoid too many #ifdefs */
1110 const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1111 const pgoff_t index = grp->obj.index;
1112 bool tocache = false;
1114 struct address_space *mapping;
1115 struct page *oldpage, *page;
1117 compressed_page_t t;
1121 page = READ_ONCE(grp->compressed_pages[nr]);
1128 * the cached page has not been allocated and
1129 * an placeholder is out there, prepare it now.
1131 if (!nocache && page == PAGE_UNALLOCATED) {
1136 /* process the target tagged pointer */
1137 t = tagptr_init(compressed_page_t, page);
1138 justfound = tagptr_unfold_tags(t);
1139 page = tagptr_unfold_ptr(t);
1141 mapping = READ_ONCE(page->mapping);
1144 * if managed cache is disabled, it's no way to
1145 * get such a cached-like page.
1148 /* if managed cache is disabled, it is impossible `justfound' */
1149 DBG_BUGON(justfound);
1151 /* and it should be locked, not uptodate, and not truncated */
1152 DBG_BUGON(!PageLocked(page));
1153 DBG_BUGON(PageUptodate(page));
1154 DBG_BUGON(!mapping);
1159 * unmanaged (file) pages are all locked solidly,
1160 * therefore it is impossible for `mapping' to be NULL.
1162 if (mapping && mapping != mc)
1163 /* ought to be unmanaged pages */
1168 /* only true if page reclaim goes wrong, should never happen */
1169 DBG_BUGON(justfound && PagePrivate(page));
1171 /* the page is still in manage cache */
1172 if (page->mapping == mc) {
1173 WRITE_ONCE(grp->compressed_pages[nr], page);
1175 if (!PagePrivate(page)) {
1177 * impossible to be !PagePrivate(page) for
1178 * the current restriction as well if
1179 * the page is already in compressed_pages[].
1181 DBG_BUGON(!justfound);
1184 set_page_private(page, (unsigned long)grp);
1185 SetPagePrivate(page);
1188 /* no need to submit io if it is already up-to-date */
1189 if (PageUptodate(page)) {
1197 * the managed page has been truncated, it's unsafe to
1198 * reuse this one, let's allocate a new cache-managed page.
1200 DBG_BUGON(page->mapping);
1201 DBG_BUGON(!justfound);
1207 page = __stagingpage_alloc(pagepool, gfp);
1208 if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1209 list_add(&page->lru, pagepool);
1213 if (nocache || !tocache)
1215 if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1216 page->mapping = Z_EROFS_MAPPING_STAGING;
1220 set_page_private(page, (unsigned long)grp);
1221 SetPagePrivate(page);
1222 out: /* the only exit (for tracing and debugging) */
1226 static struct z_erofs_vle_unzip_io *
1227 jobqueue_init(struct super_block *sb,
1228 struct z_erofs_vle_unzip_io *io,
1231 struct z_erofs_vle_unzip_io_sb *iosb;
1234 /* waitqueue available for foreground io */
1237 init_waitqueue_head(&io->u.wait);
1238 atomic_set(&io->pending_bios, 0);
1242 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1243 GFP_KERNEL | __GFP_NOFAIL);
1246 /* initialize fields in the allocated descriptor */
1249 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1251 io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1255 /* define workgroup jobqueue types */
1257 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1264 static void *jobqueueset_init(struct super_block *sb,
1265 z_erofs_vle_owned_workgrp_t qtail[],
1266 struct z_erofs_vle_unzip_io *q[],
1267 struct z_erofs_vle_unzip_io *fgq,
1270 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1272 * if managed cache is enabled, bypass jobqueue is needed,
1273 * no need to read from device for all workgroups in this queue.
1275 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1276 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1279 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1280 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1282 return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1285 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1286 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1287 z_erofs_vle_owned_workgrp_t qtail[],
1288 z_erofs_vle_owned_workgrp_t owned_head)
1290 z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1291 z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1293 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1294 if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1295 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1297 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1299 WRITE_ONCE(*submit_qtail, owned_head);
1300 WRITE_ONCE(*bypass_qtail, &grp->next);
1302 qtail[JQ_BYPASS] = &grp->next;
1305 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1306 unsigned int nr_bios,
1310 * although background is preferred, no one is pending for submission.
1311 * don't issue workqueue for decompression but drop it directly instead.
1313 if (force_fg || nr_bios)
1316 kvfree(container_of(q[JQ_SUBMIT],
1317 struct z_erofs_vle_unzip_io_sb,
1322 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1323 z_erofs_vle_owned_workgrp_t qtail[],
1324 z_erofs_vle_owned_workgrp_t owned_head)
1326 /* impossible to bypass submission for managed cache disabled */
1330 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1331 unsigned int nr_bios,
1334 /* bios should be >0 if managed cache is disabled */
1335 DBG_BUGON(!nr_bios);
1340 static bool z_erofs_vle_submit_all(struct super_block *sb,
1341 z_erofs_vle_owned_workgrp_t owned_head,
1342 struct list_head *pagepool,
1343 struct z_erofs_vle_unzip_io *fgq,
1346 struct erofs_sb_info *const sbi = EROFS_SB(sb);
1347 const unsigned int clusterpages = erofs_clusterpages(sbi);
1348 const gfp_t gfp = GFP_NOFS;
1350 z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1351 struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1354 /* since bio will be NULL, no need to initialize last_index */
1355 pgoff_t uninitialized_var(last_index);
1356 bool force_submit = false;
1357 unsigned int nr_bios;
1359 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1362 force_submit = false;
1365 bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1367 /* by default, all need io submission */
1368 q[JQ_SUBMIT]->head = owned_head;
1371 struct z_erofs_vle_workgroup *grp;
1372 pgoff_t first_index;
1374 unsigned int i = 0, bypass = 0;
1377 /* no possible 'owned_head' equals the following */
1378 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1379 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1381 grp = container_of(owned_head,
1382 struct z_erofs_vle_workgroup, next);
1384 /* close the main owned chain at first */
1385 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1386 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1388 first_index = grp->obj.index;
1389 force_submit |= (first_index != last_index + 1);
1392 page = pickup_page_for_submission(grp, i, pagepool,
1393 MNGD_MAPPING(sbi), gfp);
1395 force_submit = true;
1400 if (bio && force_submit) {
1402 __submit_bio(bio, REQ_OP_READ, 0);
1407 bio = erofs_grab_bio(sb, first_index + i,
1409 z_erofs_vle_read_endio, true);
1410 bio->bi_private = bi_private;
1415 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1416 if (err < PAGE_SIZE)
1417 goto submit_bio_retry;
1419 force_submit = false;
1420 last_index = first_index + i;
1422 if (++i < clusterpages)
1425 if (bypass < clusterpages)
1426 qtail[JQ_SUBMIT] = &grp->next;
1428 move_to_bypass_jobqueue(grp, qtail, owned_head);
1429 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1432 __submit_bio(bio, REQ_OP_READ, 0);
1434 if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1437 z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1441 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1442 struct list_head *pagepool,
1445 struct super_block *sb = f->inode->i_sb;
1446 struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1448 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1451 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1452 z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1457 /* wait until all bios are completed */
1458 wait_event(io[JQ_SUBMIT].u.wait,
1459 !atomic_read(&io[JQ_SUBMIT].pending_bios));
1461 /* let's synchronous decompression */
1462 z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1465 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1468 struct inode *const inode = page->mapping->host;
1469 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1471 LIST_HEAD(pagepool);
1473 trace_erofs_readpage(page, false);
1475 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1477 err = z_erofs_do_read_page(&f, page, &pagepool);
1478 (void)z_erofs_vle_work_iter_end(&f.builder);
1481 errln("%s, failed to read, err [%d]", __func__, err);
1485 z_erofs_submit_and_unzip(&f, &pagepool, true);
1488 put_page(f.m_iter.mpage);
1490 /* clean up the remaining free pages */
1491 put_pages_list(&pagepool);
1495 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1496 struct address_space *mapping,
1497 struct list_head *pages,
1498 unsigned int nr_pages)
1500 struct inode *const inode = mapping->host;
1501 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1503 bool sync = __should_decompress_synchronously(sbi, nr_pages);
1504 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1505 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1506 struct page *head = NULL;
1507 LIST_HEAD(pagepool);
1509 trace_erofs_readpages(mapping->host, lru_to_page(pages),
1512 f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1514 for (; nr_pages; --nr_pages) {
1515 struct page *page = lru_to_page(pages);
1517 prefetchw(&page->flags);
1518 list_del(&page->lru);
1521 * A pure asynchronous readahead is indicated if
1522 * a PG_readahead marked page is hitted at first.
1523 * Let's also do asynchronous decompression for this case.
1525 sync &= !(PageReadahead(page) && !head);
1527 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1528 list_add(&page->lru, &pagepool);
1532 set_page_private(page, (unsigned long)head);
1537 struct page *page = head;
1540 /* traversal in reverse order */
1541 head = (void *)page_private(page);
1543 err = z_erofs_do_read_page(&f, page, &pagepool);
1545 struct erofs_vnode *vi = EROFS_V(inode);
1547 errln("%s, readahead error at page %lu of nid %llu",
1548 __func__, page->index, vi->nid);
1554 (void)z_erofs_vle_work_iter_end(&f.builder);
1556 z_erofs_submit_and_unzip(&f, &pagepool, sync);
1559 put_page(f.m_iter.mpage);
1561 /* clean up the remaining free pages */
1562 put_pages_list(&pagepool);
1566 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1567 .readpage = z_erofs_vle_normalaccess_readpage,
1568 .readpages = z_erofs_vle_normalaccess_readpages,
1572 * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1574 * VLE compression mode attempts to compress a number of logical data into
1575 * a physical cluster with a fixed size.
1576 * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1578 #define __vle_cluster_advise(x, bit, bits) \
1579 ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1581 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1582 Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1584 #define vle_cluster_type(di) \
1585 __vle_cluster_type((di)->di_advise)
1588 vle_decompressed_index_clusterofs(unsigned int *clusterofs,
1589 unsigned int clustersize,
1590 struct z_erofs_vle_decompressed_index *di)
1592 switch (vle_cluster_type(di)) {
1593 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1594 *clusterofs = clustersize;
1596 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1597 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1598 *clusterofs = le16_to_cpu(di->di_clusterofs);
1607 static inline erofs_blk_t
1608 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1610 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1611 struct erofs_vnode *vi = EROFS_V(inode);
1613 unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1614 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1615 index * sizeof(struct z_erofs_vle_decompressed_index);
1617 return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1620 static inline unsigned int
1621 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1623 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1624 struct erofs_vnode *vi = EROFS_V(inode);
1626 unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1627 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1628 index * sizeof(struct z_erofs_vle_decompressed_index);
1630 return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1633 struct vle_map_blocks_iter_ctx {
1634 struct inode *inode;
1635 struct super_block *sb;
1636 unsigned int clusterbits;
1638 struct page **mpage_ret;
1643 vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
1644 unsigned int lcn, /* logical cluster number */
1645 unsigned long long *ofs,
1647 unsigned int *flags)
1649 const unsigned int clustersize = 1 << ctx->clusterbits;
1650 const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
1651 struct page *mpage = *ctx->mpage_ret; /* extent metapage */
1653 struct z_erofs_vle_decompressed_index *di;
1654 unsigned int cluster_type, delta0;
1656 if (mpage->index != mblk) {
1657 kunmap_atomic(*ctx->kaddr_ret);
1661 mpage = erofs_get_meta_page(ctx->sb, mblk, false);
1662 if (IS_ERR(mpage)) {
1663 *ctx->mpage_ret = NULL;
1664 return PTR_ERR(mpage);
1666 *ctx->mpage_ret = mpage;
1667 *ctx->kaddr_ret = kmap_atomic(mpage);
1670 di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
1672 cluster_type = vle_cluster_type(di);
1673 switch (cluster_type) {
1674 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1675 delta0 = le16_to_cpu(di->di_u.delta[0]);
1676 if (unlikely(!delta0 || delta0 > lcn)) {
1677 errln("invalid NONHEAD dl0 %u at lcn %u of nid %llu",
1678 delta0, lcn, EROFS_V(ctx->inode)->nid);
1682 return vle_get_logical_extent_head(ctx,
1683 lcn - delta0, ofs, pblk, flags);
1684 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1685 *flags ^= EROFS_MAP_ZIPPED;
1687 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1688 /* clustersize should be a power of two */
1689 *ofs = ((u64)lcn << ctx->clusterbits) +
1690 (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1691 *pblk = le32_to_cpu(di->di_u.blkaddr);
1694 errln("unknown cluster type %u at lcn %u of nid %llu",
1695 cluster_type, lcn, EROFS_V(ctx->inode)->nid);
1702 int z_erofs_map_blocks_iter(struct inode *inode,
1703 struct erofs_map_blocks *map,
1704 struct page **mpage_ret, int flags)
1707 const struct vle_map_blocks_iter_ctx ctx = {
1710 .clusterbits = EROFS_I_SB(inode)->clusterbits,
1711 .mpage_ret = mpage_ret,
1714 const unsigned int clustersize = 1 << ctx.clusterbits;
1715 /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1716 const bool initial = !map->m_llen;
1718 /* logicial extent (start, end) offset */
1719 unsigned long long ofs, end;
1723 /* initialize `pblk' to keep gcc from printing foolish warnings */
1724 erofs_blk_t mblk, pblk = 0;
1725 struct page *mpage = *mpage_ret;
1726 struct z_erofs_vle_decompressed_index *di;
1727 unsigned int cluster_type, logical_cluster_ofs;
1730 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
1732 /* when trying to read beyond EOF, leave it unmapped */
1733 if (unlikely(map->m_la >= inode->i_size)) {
1734 DBG_BUGON(!initial);
1735 map->m_llen = map->m_la + 1 - inode->i_size;
1736 map->m_la = inode->i_size;
1741 debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1742 map->m_la, map->m_llen);
1744 ofs = map->m_la + map->m_llen;
1746 /* clustersize should be power of two */
1747 lcn = ofs >> ctx.clusterbits;
1748 ofs_rem = ofs & (clustersize - 1);
1750 mblk = vle_extent_blkaddr(inode, lcn);
1752 if (!mpage || mpage->index != mblk) {
1756 mpage = erofs_get_meta_page(ctx.sb, mblk, false);
1757 if (IS_ERR(mpage)) {
1758 err = PTR_ERR(mpage);
1764 DBG_BUGON(!PageUptodate(mpage));
1767 kaddr = kmap_atomic(mpage);
1768 di = kaddr + vle_extent_blkoff(inode, lcn);
1770 debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
1771 mblk, vle_extent_blkoff(inode, lcn));
1773 err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
1779 /* [walking mode] 'map' has been already initialized */
1780 map->m_llen += logical_cluster_ofs;
1784 /* by default, compressed */
1785 map->m_flags |= EROFS_MAP_ZIPPED;
1787 end = ((u64)lcn + 1) * clustersize;
1789 cluster_type = vle_cluster_type(di);
1791 switch (cluster_type) {
1792 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1793 if (ofs_rem >= logical_cluster_ofs)
1794 map->m_flags ^= EROFS_MAP_ZIPPED;
1796 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1797 if (ofs_rem == logical_cluster_ofs) {
1798 pblk = le32_to_cpu(di->di_u.blkaddr);
1802 if (ofs_rem > logical_cluster_ofs) {
1803 ofs = (u64)lcn * clustersize | logical_cluster_ofs;
1804 pblk = le32_to_cpu(di->di_u.blkaddr);
1808 /* logical cluster number should be >= 1 */
1809 if (unlikely(!lcn)) {
1810 errln("invalid logical cluster 0 at nid %llu",
1811 EROFS_V(inode)->nid);
1815 end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
1817 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1818 /* get the correspoinding first chunk */
1819 err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
1820 &pblk, &map->m_flags);
1823 if (unlikely(err)) {
1830 errln("unknown cluster type %u at offset %llu of nid %llu",
1831 cluster_type, ofs, EROFS_V(inode)->nid);
1838 map->m_llen = end - ofs;
1839 map->m_plen = clustersize;
1840 map->m_pa = blknr_to_addr(pblk);
1841 map->m_flags |= EROFS_MAP_MAPPED;
1843 kunmap_atomic(kaddr);
1846 debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1847 __func__, map->m_la, map->m_pa,
1848 map->m_llen, map->m_plen, map->m_flags);
1850 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
1852 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1853 DBG_BUGON(err < 0 && err != -ENOMEM);