1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
14 #include <linux/prefetch.h>
16 #include <trace/events/erofs.h>
19 * a compressed_pages[] placeholder in order to avoid
20 * being filled with file pages for in-place decompression.
22 #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
24 /* how to allocate cached pages for a workgroup */
25 enum z_erofs_cache_alloctype {
26 DONTALLOC, /* don't allocate any cached pages */
27 DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
31 * tagged pointer with 1-bit tag for all compressed pages
32 * tag 0 - the page is just found with an extra page reference
34 typedef tagptr1_t compressed_page_t;
36 #define tag_compressed_page_justfound(page) \
37 tagptr_fold(compressed_page_t, page, 1)
39 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
40 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
42 void z_erofs_exit_zip_subsystem(void)
44 destroy_workqueue(z_erofs_workqueue);
45 kmem_cache_destroy(z_erofs_workgroup_cachep);
48 static inline int init_unzip_workqueue(void)
50 const unsigned int onlinecpus = num_possible_cpus();
53 * we don't need too many threads, limiting threads
54 * could improve scheduling performance.
57 alloc_workqueue("erofs_unzipd",
58 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
59 onlinecpus + onlinecpus / 4);
61 return z_erofs_workqueue ? 0 : -ENOMEM;
64 static void init_once(void *ptr)
66 struct z_erofs_vle_workgroup *grp = ptr;
67 struct z_erofs_vle_work *const work =
68 z_erofs_vle_grab_primary_work(grp);
71 mutex_init(&work->lock);
74 for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
75 grp->compressed_pages[i] = NULL;
78 static void init_always(struct z_erofs_vle_workgroup *grp)
80 struct z_erofs_vle_work *const work =
81 z_erofs_vle_grab_primary_work(grp);
83 atomic_set(&grp->obj.refcount, 1);
86 DBG_BUGON(work->nr_pages);
87 DBG_BUGON(work->vcnt);
90 int __init z_erofs_init_zip_subsystem(void)
92 z_erofs_workgroup_cachep =
93 kmem_cache_create("erofs_compress",
94 Z_EROFS_WORKGROUP_SIZE, 0,
95 SLAB_RECLAIM_ACCOUNT, init_once);
97 if (z_erofs_workgroup_cachep) {
98 if (!init_unzip_workqueue())
101 kmem_cache_destroy(z_erofs_workgroup_cachep);
106 enum z_erofs_vle_work_role {
107 Z_EROFS_VLE_WORK_SECONDARY,
108 Z_EROFS_VLE_WORK_PRIMARY,
110 * The current work was the tail of an exist chain, and the previous
111 * processed chained works are all decided to be hooked up to it.
112 * A new chain should be created for the remaining unprocessed works,
113 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
114 * the next work cannot reuse the whole page in the following scenario:
115 * ________________________________________________________________
116 * | tail (partial) page | head (partial) page |
117 * | (belongs to the next work) | (belongs to the current work) |
118 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
120 Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
122 * The current work has been linked with the processed chained works,
123 * and could be also linked with the potential remaining works, which
124 * means if the processing page is the tail partial page of the work,
125 * the current work can safely use the whole page (since the next work
126 * is under control) for in-place decompression, as illustrated below:
127 * ________________________________________________________________
128 * | tail (partial) page | head (partial) page |
129 * | (of the current work) | (of the previous work) |
130 * | PRIMARY_FOLLOWED or | |
131 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
133 * [ (*) the above page can be used for the current work itself. ]
135 Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
139 struct z_erofs_vle_work_builder {
140 enum z_erofs_vle_work_role role;
142 * 'hosted = false' means that the current workgroup doesn't belong to
143 * the owned chained workgroups. In the other words, it is none of our
144 * business to submit this workgroup.
148 struct z_erofs_vle_workgroup *grp;
149 struct z_erofs_vle_work *work;
150 struct z_erofs_pagevec_ctor vector;
152 /* pages used for reading the compressed data */
153 struct page **compressed_pages;
154 unsigned int compressed_deficit;
157 #define VLE_WORK_BUILDER_INIT() \
158 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
160 #ifdef EROFS_FS_HAS_MANAGED_CACHE
161 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
162 struct address_space *mc,
164 unsigned int clusterpages,
165 enum z_erofs_cache_alloctype type,
166 struct list_head *pagepool,
169 struct page **const pages = bl->compressed_pages;
170 const unsigned int remaining = bl->compressed_deficit;
171 bool standalone = true;
172 unsigned int i, j = 0;
174 if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
177 gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
179 index += clusterpages - remaining;
181 for (i = 0; i < remaining; ++i) {
185 /* the compressed page was loaded before */
186 if (READ_ONCE(pages[i]))
189 page = find_get_page(mc, index + i);
192 t = tag_compressed_page_justfound(page);
193 } else if (type == DELAYEDALLOC) {
194 t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
195 } else { /* DONTALLOC */
202 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
208 bl->compressed_pages += j;
209 bl->compressed_deficit = remaining - j;
212 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
215 /* called by erofs_shrinker to get rid of all compressed_pages */
216 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
217 struct erofs_workgroup *egrp)
219 struct z_erofs_vle_workgroup *const grp =
220 container_of(egrp, struct z_erofs_vle_workgroup, obj);
221 struct address_space *const mapping = MNGD_MAPPING(sbi);
222 const int clusterpages = erofs_clusterpages(sbi);
226 * refcount of workgroup is now freezed as 1,
227 * therefore no need to worry about available decompression users.
229 for (i = 0; i < clusterpages; ++i) {
230 struct page *page = grp->compressed_pages[i];
232 if (!page || page->mapping != mapping)
235 /* block other users from reclaiming or migrating the page */
236 if (!trylock_page(page))
239 /* barrier is implied in the following 'unlock_page' */
240 WRITE_ONCE(grp->compressed_pages[i], NULL);
242 set_page_private(page, 0);
243 ClearPagePrivate(page);
251 int erofs_try_to_free_cached_page(struct address_space *mapping,
254 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
255 const unsigned int clusterpages = erofs_clusterpages(sbi);
256 struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
257 int ret = 0; /* 0 - busy */
259 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
262 for (i = 0; i < clusterpages; ++i) {
263 if (grp->compressed_pages[i] == page) {
264 WRITE_ONCE(grp->compressed_pages[i], NULL);
269 erofs_workgroup_unfreeze(&grp->obj, 1);
272 ClearPagePrivate(page);
279 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
280 struct address_space *mc,
282 unsigned int clusterpages,
283 enum z_erofs_cache_alloctype type,
284 struct list_head *pagepool,
287 /* nowhere to load compressed pages from */
291 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
292 static inline bool try_to_reuse_as_compressed_page(
293 struct z_erofs_vle_work_builder *b,
296 while (b->compressed_deficit) {
297 --b->compressed_deficit;
298 if (!cmpxchg(b->compressed_pages++, NULL, page))
305 /* callers must be with work->lock held */
306 static int z_erofs_vle_work_add_page(
307 struct z_erofs_vle_work_builder *builder,
309 enum z_erofs_page_type type)
314 /* give priority for the compressed data storage */
315 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
316 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
317 try_to_reuse_as_compressed_page(builder, page))
320 ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
321 page, type, &occupied);
322 builder->work->vcnt += (unsigned int)ret;
324 return ret ? 0 : -EAGAIN;
327 static enum z_erofs_vle_work_role
328 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
329 z_erofs_vle_owned_workgrp_t *owned_head,
332 DBG_BUGON(*hosted == true);
334 /* let's claim these following types of workgroup */
336 if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
337 /* type 1, nil workgroup */
338 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
339 *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
342 *owned_head = &grp->next;
344 /* lucky, I am the followee :) */
345 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
347 } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
349 * type 2, link to the end of a existing open chain,
350 * be careful that its submission itself is governed
351 * by the original owned chain.
353 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
354 *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
356 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
357 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
360 return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
363 struct z_erofs_vle_work_finder {
364 struct super_block *sb;
366 unsigned int pageofs;
368 struct z_erofs_vle_workgroup **grp_ret;
369 enum z_erofs_vle_work_role *role;
370 z_erofs_vle_owned_workgrp_t *owned_head;
374 static struct z_erofs_vle_work *
375 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
378 struct erofs_workgroup *egrp;
379 struct z_erofs_vle_workgroup *grp;
380 struct z_erofs_vle_work *work;
382 egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
388 grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
391 work = z_erofs_vle_grab_work(grp, f->pageofs);
392 /* if multiref is disabled, `primary' is always true */
395 DBG_BUGON(work->pageofs != f->pageofs);
398 * lock must be taken first to avoid grp->next == NIL between
399 * claiming workgroup and adding pages:
403 * mutex_lock(&work->lock)
404 * add all pages to pagevec
406 * [correct locking case 1]:
407 * mutex_lock(grp->work[a])
409 * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
410 * ... *role = SECONDARY
411 * add all pages to pagevec
413 * mutex_unlock(grp->work[c])
414 * mutex_lock(grp->work[c])
419 * [correct locking case 2]:
420 * mutex_lock(grp->work[b])
422 * mutex_lock(grp->work[a])
424 * mutex_lock(grp->work[c])
428 * mutex_lock(grp->work[a])
429 * *role = PRIMARY_OWNER
430 * add all pages to pagevec
433 mutex_lock(&work->lock);
437 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
438 else /* claim the workgroup if possible */
439 *f->role = try_to_claim_workgroup(grp, f->owned_head,
444 static struct z_erofs_vle_work *
445 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
446 struct erofs_map_blocks *map)
449 struct z_erofs_vle_workgroup *grp = *f->grp_ret;
450 struct z_erofs_vle_work *work;
452 /* if multiref is disabled, grp should never be nullptr */
455 return ERR_PTR(-EINVAL);
458 /* no available workgroup, let's allocate one */
459 grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
461 return ERR_PTR(-ENOMEM);
464 grp->obj.index = f->idx;
465 grp->llen = map->m_llen;
467 z_erofs_vle_set_workgrp_fmt(grp,
468 (map->m_flags & EROFS_MAP_ZIPPED) ?
469 Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
470 Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
472 /* new workgrps have been claimed as type 1 */
473 WRITE_ONCE(grp->next, *f->owned_head);
474 /* primary and followed work for all new workgrps */
475 *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
476 /* it should be submitted by ourselves */
480 work = z_erofs_vle_grab_primary_work(grp);
481 work->pageofs = f->pageofs;
484 * lock all primary followed works before visible to others
485 * and mutex_trylock *never* fails for a new workgroup.
487 mutex_trylock(&work->lock);
490 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
493 mutex_unlock(&work->lock);
494 kmem_cache_free(z_erofs_workgroup_cachep, grp);
495 return ERR_PTR(-EAGAIN);
499 *f->owned_head = &grp->next;
504 #define builder_is_hooked(builder) \
505 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
507 #define builder_is_followed(builder) \
508 ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
510 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
511 struct super_block *sb,
512 struct erofs_map_blocks *map,
513 z_erofs_vle_owned_workgrp_t *owned_head)
515 const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
516 struct z_erofs_vle_workgroup *grp;
517 const struct z_erofs_vle_work_finder finder = {
519 .idx = erofs_blknr(map->m_pa),
520 .pageofs = map->m_la & ~PAGE_MASK,
522 .role = &builder->role,
523 .owned_head = owned_head,
524 .hosted = &builder->hosted
526 struct z_erofs_vle_work *work;
528 DBG_BUGON(builder->work);
530 /* must be Z_EROFS_WORK_TAIL or the next chained work */
531 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
532 DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
534 DBG_BUGON(erofs_blkoff(map->m_pa));
537 work = z_erofs_vle_work_lookup(&finder);
539 unsigned int orig_llen;
541 /* increase workgroup `llen' if needed */
542 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
543 orig_llen != cmpxchg_relaxed(&grp->llen,
544 orig_llen, map->m_llen))
549 work = z_erofs_vle_work_register(&finder, map);
550 if (unlikely(work == ERR_PTR(-EAGAIN)))
554 return PTR_ERR(work);
556 z_erofs_pagevec_ctor_init(&builder->vector,
557 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
559 if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
560 /* enable possibly in-place decompression */
561 builder->compressed_pages = grp->compressed_pages;
562 builder->compressed_deficit = clusterpages;
564 builder->compressed_pages = NULL;
565 builder->compressed_deficit = 0;
569 builder->work = work;
574 * keep in mind that no referenced workgroups will be freed
575 * only after a RCU grace period, so rcu_read_lock() could
576 * prevent a workgroup from being freed.
578 static void z_erofs_rcu_callback(struct rcu_head *head)
580 struct z_erofs_vle_work *work = container_of(head,
581 struct z_erofs_vle_work, rcu);
582 struct z_erofs_vle_workgroup *grp =
583 z_erofs_vle_work_workgroup(work, true);
585 kmem_cache_free(z_erofs_workgroup_cachep, grp);
588 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
590 struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
591 struct z_erofs_vle_workgroup, obj);
592 struct z_erofs_vle_work *const work = &vgrp->work;
594 call_rcu(&work->rcu, z_erofs_rcu_callback);
597 static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
598 struct z_erofs_vle_work *work __maybe_unused)
600 erofs_workgroup_put(&grp->obj);
603 static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
605 struct z_erofs_vle_workgroup *grp =
606 z_erofs_vle_work_workgroup(work, true);
608 __z_erofs_vle_work_release(grp, work);
612 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
614 struct z_erofs_vle_work *work = builder->work;
619 z_erofs_pagevec_ctor_exit(&builder->vector, false);
620 mutex_unlock(&work->lock);
623 * if all pending pages are added, don't hold work reference
624 * any longer if the current work isn't hosted by ourselves.
626 if (!builder->hosted)
627 __z_erofs_vle_work_release(builder->grp, work);
629 builder->work = NULL;
634 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
637 struct page *page = erofs_allocpage(pagepool, gfp);
642 page->mapping = Z_EROFS_MAPPING_STAGING;
646 struct z_erofs_vle_frontend {
647 struct inode *const inode;
649 struct z_erofs_vle_work_builder builder;
650 struct erofs_map_blocks map;
652 z_erofs_vle_owned_workgrp_t owned_head;
654 /* used for applying cache strategy on the fly */
656 erofs_off_t headoffset;
659 #define VLE_FRONTEND_INIT(__i) { \
666 .builder = VLE_WORK_BUILDER_INIT(), \
667 .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
670 #ifdef EROFS_FS_HAS_MANAGED_CACHE
672 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
677 if (EROFS_FS_ZIP_CACHE_LVL >= 2)
678 return la < fe->headoffset;
684 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
690 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
692 struct list_head *page_pool)
694 struct super_block *const sb = fe->inode->i_sb;
695 struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
696 struct erofs_map_blocks *const map = &fe->map;
697 struct z_erofs_vle_work_builder *const builder = &fe->builder;
698 const loff_t offset = page_offset(page);
700 bool tight = builder_is_hooked(builder);
701 struct z_erofs_vle_work *work = builder->work;
703 enum z_erofs_cache_alloctype cache_strategy;
704 enum z_erofs_page_type page_type;
705 unsigned int cur, end, spiltted, index;
708 /* register locked file pages as online pages in pack */
709 z_erofs_onlinepage_init(page);
716 /* lucky, within the range of the current map_blocks */
717 if (offset + cur >= map->m_la &&
718 offset + cur < map->m_la + map->m_llen) {
719 /* didn't get a valid unzip work previously (very rare) */
725 /* go ahead the next map_blocks */
726 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
728 if (z_erofs_vle_work_iter_end(builder))
729 fe->backmost = false;
731 map->m_la = offset + cur;
733 err = z_erofs_map_blocks_iter(fe->inode, map, 0);
738 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
741 DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
742 DBG_BUGON(erofs_blkoff(map->m_pa));
744 err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
748 /* preload all compressed pages (maybe downgrade role if necessary) */
749 if (should_alloc_managed_pages(fe, map->m_la))
750 cache_strategy = DELAYEDALLOC;
752 cache_strategy = DONTALLOC;
754 preload_compressed_pages(builder, MNGD_MAPPING(sbi),
755 map->m_pa / PAGE_SIZE,
756 map->m_plen / PAGE_SIZE,
757 cache_strategy, page_pool, GFP_KERNEL);
759 tight &= builder_is_hooked(builder);
760 work = builder->work;
762 cur = end - min_t(unsigned int, offset + end - map->m_la, end);
763 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
764 zero_user_segment(page, cur, end);
768 /* let's derive page type */
769 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
770 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
771 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
772 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
775 tight &= builder_is_followed(builder);
778 err = z_erofs_vle_work_add_page(builder, page, page_type);
779 /* should allocate an additional staging page for pagevec */
780 if (err == -EAGAIN) {
781 struct page *const newpage =
782 __stagingpage_alloc(page_pool, GFP_NOFS);
784 err = z_erofs_vle_work_add_page(builder,
785 newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
793 index = page->index - map->m_la / PAGE_SIZE;
795 /* FIXME! avoid the last relundant fixup & endio */
796 z_erofs_onlinepage_fixup(page, index, true);
798 /* bump up the number of spiltted parts of a page */
800 /* also update nr_pages */
801 work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
803 /* can be used for verification */
804 map->m_llen = offset + cur - map->m_la;
811 /* FIXME! avoid the last relundant fixup & endio */
812 z_erofs_onlinepage_endio(page);
814 debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
815 __func__, page, spiltted, map->m_llen);
818 /* if some error occurred while processing this page */
824 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
826 tagptr1_t t = tagptr_init(tagptr1_t, ptr);
827 struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
828 bool background = tagptr_unfold_tags(t);
833 spin_lock_irqsave(&io->u.wait.lock, flags);
834 if (!atomic_add_return(bios, &io->pending_bios))
835 wake_up_locked(&io->u.wait);
836 spin_unlock_irqrestore(&io->u.wait.lock, flags);
840 if (!atomic_add_return(bios, &io->pending_bios))
841 queue_work(z_erofs_workqueue, &io->u.work);
844 static inline void z_erofs_vle_read_endio(struct bio *bio)
846 const blk_status_t err = bio->bi_status;
848 struct bio_vec *bvec;
849 #ifdef EROFS_FS_HAS_MANAGED_CACHE
850 struct address_space *mc = NULL;
853 bio_for_each_segment_all(bvec, bio, i) {
854 struct page *page = bvec->bv_page;
855 bool cachemngd = false;
857 DBG_BUGON(PageUptodate(page));
858 DBG_BUGON(!page->mapping);
860 #ifdef EROFS_FS_HAS_MANAGED_CACHE
861 if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
862 struct inode *const inode = page->mapping->host;
863 struct super_block *const sb = inode->i_sb;
865 mc = MNGD_MAPPING(EROFS_SB(sb));
869 * If mc has not gotten, it equals NULL,
870 * however, page->mapping never be NULL if working properly.
872 cachemngd = (page->mapping == mc);
878 SetPageUptodate(page);
884 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
888 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
889 static DEFINE_MUTEX(z_pagemap_global_lock);
891 static int z_erofs_vle_unzip(struct super_block *sb,
892 struct z_erofs_vle_workgroup *grp,
893 struct list_head *page_pool)
895 struct erofs_sb_info *const sbi = EROFS_SB(sb);
896 const unsigned int clusterpages = erofs_clusterpages(sbi);
898 struct z_erofs_pagevec_ctor ctor;
899 unsigned int nr_pages;
900 unsigned int sparsemem_pages = 0;
901 struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
902 struct page **pages, **compressed_pages, *page;
903 unsigned int i, llen;
905 enum z_erofs_page_type page_type;
907 struct z_erofs_vle_work *work;
912 work = z_erofs_vle_grab_primary_work(grp);
913 DBG_BUGON(!READ_ONCE(work->nr_pages));
915 mutex_lock(&work->lock);
916 nr_pages = work->nr_pages;
918 if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
919 pages = pages_onstack;
920 else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
921 mutex_trylock(&z_pagemap_global_lock))
922 pages = z_pagemap_global;
925 pages = kvmalloc_array(nr_pages,
926 sizeof(struct page *), GFP_KERNEL);
928 /* fallback to global pagemap for the lowmem scenario */
929 if (unlikely(!pages)) {
930 if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
933 mutex_lock(&z_pagemap_global_lock);
934 pages = z_pagemap_global;
939 for (i = 0; i < nr_pages; ++i)
942 z_erofs_pagevec_ctor_init(&ctor,
943 Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
945 for (i = 0; i < work->vcnt; ++i) {
948 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
950 /* all pages in pagevec ought to be valid */
952 DBG_BUGON(!page->mapping);
954 if (z_erofs_gather_if_stagingpage(page_pool, page))
957 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
960 pagenr = z_erofs_onlinepage_index(page);
962 DBG_BUGON(pagenr >= nr_pages);
963 DBG_BUGON(pages[pagenr]);
965 pages[pagenr] = page;
969 z_erofs_pagevec_ctor_exit(&ctor, true);
972 compressed_pages = grp->compressed_pages;
974 for (i = 0; i < clusterpages; ++i) {
977 page = compressed_pages[i];
979 /* all compressed pages ought to be valid */
981 DBG_BUGON(!page->mapping);
983 if (z_erofs_is_stagingpage(page))
985 #ifdef EROFS_FS_HAS_MANAGED_CACHE
986 if (page->mapping == MNGD_MAPPING(sbi)) {
987 DBG_BUGON(!PageUptodate(page));
992 /* only non-head page could be reused as a compressed page */
993 pagenr = z_erofs_onlinepage_index(page);
995 DBG_BUGON(pagenr >= nr_pages);
996 DBG_BUGON(pages[pagenr]);
998 pages[pagenr] = page;
1003 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1005 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
1006 err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
1007 pages, nr_pages, work->pageofs);
1011 if (llen > grp->llen)
1014 err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
1015 pages, llen, work->pageofs);
1016 if (err != -ENOTSUPP)
1019 if (sparsemem_pages >= nr_pages)
1020 goto skip_allocpage;
1022 for (i = 0; i < nr_pages; ++i) {
1026 pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
1030 vout = erofs_vmap(pages, nr_pages);
1032 err = z_erofs_vle_unzip_vmap(compressed_pages,
1033 clusterpages, vout, llen, work->pageofs, overlapped);
1035 erofs_vunmap(vout, nr_pages);
1038 /* must handle all compressed pages before endding pages */
1039 for (i = 0; i < clusterpages; ++i) {
1040 page = compressed_pages[i];
1042 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1043 if (page->mapping == MNGD_MAPPING(sbi))
1046 /* recycle all individual staging pages */
1047 (void)z_erofs_gather_if_stagingpage(page_pool, page);
1049 WRITE_ONCE(compressed_pages[i], NULL);
1052 for (i = 0; i < nr_pages; ++i) {
1057 DBG_BUGON(!page->mapping);
1059 /* recycle all individual staging pages */
1060 if (z_erofs_gather_if_stagingpage(page_pool, page))
1063 if (unlikely(err < 0))
1066 z_erofs_onlinepage_endio(page);
1069 if (pages == z_pagemap_global)
1070 mutex_unlock(&z_pagemap_global_lock);
1071 else if (unlikely(pages != pages_onstack))
1077 /* all work locks MUST be taken before the following line */
1079 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1081 /* all work locks SHOULD be released right now */
1082 mutex_unlock(&work->lock);
1084 z_erofs_vle_work_release(work);
1088 static void z_erofs_vle_unzip_all(struct super_block *sb,
1089 struct z_erofs_vle_unzip_io *io,
1090 struct list_head *page_pool)
1092 z_erofs_vle_owned_workgrp_t owned = io->head;
1094 while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1095 struct z_erofs_vle_workgroup *grp;
1097 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1098 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1100 /* no possible that 'owned' equals NULL */
1101 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1103 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1104 owned = READ_ONCE(grp->next);
1106 z_erofs_vle_unzip(sb, grp, page_pool);
1110 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1112 struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1113 struct z_erofs_vle_unzip_io_sb, io.u.work);
1114 LIST_HEAD(page_pool);
1116 DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1117 z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1119 put_pages_list(&page_pool);
1123 static struct page *
1124 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1126 struct list_head *pagepool,
1127 struct address_space *mc,
1130 /* determined at compile time to avoid too many #ifdefs */
1131 const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1132 const pgoff_t index = grp->obj.index;
1133 bool tocache = false;
1135 struct address_space *mapping;
1136 struct page *oldpage, *page;
1138 compressed_page_t t;
1142 page = READ_ONCE(grp->compressed_pages[nr]);
1149 * the cached page has not been allocated and
1150 * an placeholder is out there, prepare it now.
1152 if (!nocache && page == PAGE_UNALLOCATED) {
1157 /* process the target tagged pointer */
1158 t = tagptr_init(compressed_page_t, page);
1159 justfound = tagptr_unfold_tags(t);
1160 page = tagptr_unfold_ptr(t);
1162 mapping = READ_ONCE(page->mapping);
1165 * if managed cache is disabled, it's no way to
1166 * get such a cached-like page.
1169 /* if managed cache is disabled, it is impossible `justfound' */
1170 DBG_BUGON(justfound);
1172 /* and it should be locked, not uptodate, and not truncated */
1173 DBG_BUGON(!PageLocked(page));
1174 DBG_BUGON(PageUptodate(page));
1175 DBG_BUGON(!mapping);
1180 * unmanaged (file) pages are all locked solidly,
1181 * therefore it is impossible for `mapping' to be NULL.
1183 if (mapping && mapping != mc)
1184 /* ought to be unmanaged pages */
1189 /* only true if page reclaim goes wrong, should never happen */
1190 DBG_BUGON(justfound && PagePrivate(page));
1192 /* the page is still in manage cache */
1193 if (page->mapping == mc) {
1194 WRITE_ONCE(grp->compressed_pages[nr], page);
1196 if (!PagePrivate(page)) {
1198 * impossible to be !PagePrivate(page) for
1199 * the current restriction as well if
1200 * the page is already in compressed_pages[].
1202 DBG_BUGON(!justfound);
1205 set_page_private(page, (unsigned long)grp);
1206 SetPagePrivate(page);
1209 /* no need to submit io if it is already up-to-date */
1210 if (PageUptodate(page)) {
1218 * the managed page has been truncated, it's unsafe to
1219 * reuse this one, let's allocate a new cache-managed page.
1221 DBG_BUGON(page->mapping);
1222 DBG_BUGON(!justfound);
1228 page = __stagingpage_alloc(pagepool, gfp);
1229 if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1230 list_add(&page->lru, pagepool);
1234 if (nocache || !tocache)
1236 if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1237 page->mapping = Z_EROFS_MAPPING_STAGING;
1241 set_page_private(page, (unsigned long)grp);
1242 SetPagePrivate(page);
1243 out: /* the only exit (for tracing and debugging) */
1247 static struct z_erofs_vle_unzip_io *
1248 jobqueue_init(struct super_block *sb,
1249 struct z_erofs_vle_unzip_io *io,
1252 struct z_erofs_vle_unzip_io_sb *iosb;
1255 /* waitqueue available for foreground io */
1258 init_waitqueue_head(&io->u.wait);
1259 atomic_set(&io->pending_bios, 0);
1263 iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
1264 GFP_KERNEL | __GFP_NOFAIL);
1267 /* initialize fields in the allocated descriptor */
1270 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1272 io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1276 /* define workgroup jobqueue types */
1278 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1285 static void *jobqueueset_init(struct super_block *sb,
1286 z_erofs_vle_owned_workgrp_t qtail[],
1287 struct z_erofs_vle_unzip_io *q[],
1288 struct z_erofs_vle_unzip_io *fgq,
1291 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1293 * if managed cache is enabled, bypass jobqueue is needed,
1294 * no need to read from device for all workgroups in this queue.
1296 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1297 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1300 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1301 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1303 return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1306 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1307 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1308 z_erofs_vle_owned_workgrp_t qtail[],
1309 z_erofs_vle_owned_workgrp_t owned_head)
1311 z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1312 z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1314 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1315 if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1316 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1318 WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1320 WRITE_ONCE(*submit_qtail, owned_head);
1321 WRITE_ONCE(*bypass_qtail, &grp->next);
1323 qtail[JQ_BYPASS] = &grp->next;
1326 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1327 unsigned int nr_bios,
1331 * although background is preferred, no one is pending for submission.
1332 * don't issue workqueue for decompression but drop it directly instead.
1334 if (force_fg || nr_bios)
1337 kvfree(container_of(q[JQ_SUBMIT],
1338 struct z_erofs_vle_unzip_io_sb,
1343 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1344 z_erofs_vle_owned_workgrp_t qtail[],
1345 z_erofs_vle_owned_workgrp_t owned_head)
1347 /* impossible to bypass submission for managed cache disabled */
1351 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1352 unsigned int nr_bios,
1355 /* bios should be >0 if managed cache is disabled */
1356 DBG_BUGON(!nr_bios);
1361 static bool z_erofs_vle_submit_all(struct super_block *sb,
1362 z_erofs_vle_owned_workgrp_t owned_head,
1363 struct list_head *pagepool,
1364 struct z_erofs_vle_unzip_io *fgq,
1367 struct erofs_sb_info *const sbi = EROFS_SB(sb);
1368 const unsigned int clusterpages = erofs_clusterpages(sbi);
1369 const gfp_t gfp = GFP_NOFS;
1371 z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1372 struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1375 /* since bio will be NULL, no need to initialize last_index */
1376 pgoff_t uninitialized_var(last_index);
1377 bool force_submit = false;
1378 unsigned int nr_bios;
1380 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1383 force_submit = false;
1386 bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1388 /* by default, all need io submission */
1389 q[JQ_SUBMIT]->head = owned_head;
1392 struct z_erofs_vle_workgroup *grp;
1393 pgoff_t first_index;
1395 unsigned int i = 0, bypass = 0;
1398 /* no possible 'owned_head' equals the following */
1399 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1400 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1402 grp = container_of(owned_head,
1403 struct z_erofs_vle_workgroup, next);
1405 /* close the main owned chain at first */
1406 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1407 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1409 first_index = grp->obj.index;
1410 force_submit |= (first_index != last_index + 1);
1413 page = pickup_page_for_submission(grp, i, pagepool,
1414 MNGD_MAPPING(sbi), gfp);
1416 force_submit = true;
1421 if (bio && force_submit) {
1423 __submit_bio(bio, REQ_OP_READ, 0);
1428 bio = erofs_grab_bio(sb, first_index + i,
1430 z_erofs_vle_read_endio, true);
1431 bio->bi_private = bi_private;
1436 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1437 if (err < PAGE_SIZE)
1438 goto submit_bio_retry;
1440 force_submit = false;
1441 last_index = first_index + i;
1443 if (++i < clusterpages)
1446 if (bypass < clusterpages)
1447 qtail[JQ_SUBMIT] = &grp->next;
1449 move_to_bypass_jobqueue(grp, qtail, owned_head);
1450 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1453 __submit_bio(bio, REQ_OP_READ, 0);
1455 if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1458 z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1462 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1463 struct list_head *pagepool,
1466 struct super_block *sb = f->inode->i_sb;
1467 struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1469 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1472 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1473 z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1478 /* wait until all bios are completed */
1479 wait_event(io[JQ_SUBMIT].u.wait,
1480 !atomic_read(&io[JQ_SUBMIT].pending_bios));
1482 /* let's synchronous decompression */
1483 z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1486 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1489 struct inode *const inode = page->mapping->host;
1490 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1492 LIST_HEAD(pagepool);
1494 trace_erofs_readpage(page, false);
1496 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1498 err = z_erofs_do_read_page(&f, page, &pagepool);
1499 (void)z_erofs_vle_work_iter_end(&f.builder);
1502 errln("%s, failed to read, err [%d]", __func__, err);
1506 z_erofs_submit_and_unzip(&f, &pagepool, true);
1509 put_page(f.map.mpage);
1511 /* clean up the remaining free pages */
1512 put_pages_list(&pagepool);
1516 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1517 struct address_space *mapping,
1518 struct list_head *pages,
1519 unsigned int nr_pages)
1521 struct inode *const inode = mapping->host;
1522 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1524 bool sync = __should_decompress_synchronously(sbi, nr_pages);
1525 struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1526 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1527 struct page *head = NULL;
1528 LIST_HEAD(pagepool);
1530 trace_erofs_readpages(mapping->host, lru_to_page(pages),
1533 f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1535 for (; nr_pages; --nr_pages) {
1536 struct page *page = lru_to_page(pages);
1538 prefetchw(&page->flags);
1539 list_del(&page->lru);
1542 * A pure asynchronous readahead is indicated if
1543 * a PG_readahead marked page is hitted at first.
1544 * Let's also do asynchronous decompression for this case.
1546 sync &= !(PageReadahead(page) && !head);
1548 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1549 list_add(&page->lru, &pagepool);
1553 set_page_private(page, (unsigned long)head);
1558 struct page *page = head;
1561 /* traversal in reverse order */
1562 head = (void *)page_private(page);
1564 err = z_erofs_do_read_page(&f, page, &pagepool);
1566 struct erofs_vnode *vi = EROFS_V(inode);
1568 errln("%s, readahead error at page %lu of nid %llu",
1569 __func__, page->index, vi->nid);
1575 (void)z_erofs_vle_work_iter_end(&f.builder);
1577 z_erofs_submit_and_unzip(&f, &pagepool, sync);
1580 put_page(f.map.mpage);
1582 /* clean up the remaining free pages */
1583 put_pages_list(&pagepool);
1587 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1588 .readpage = z_erofs_vle_normalaccess_readpage,
1589 .readpages = z_erofs_vle_normalaccess_readpages,
1593 * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
1595 * VLE compression mode attempts to compress a number of logical data into
1596 * a physical cluster with a fixed size.
1597 * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
1599 #define __vle_cluster_advise(x, bit, bits) \
1600 ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
1602 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
1603 Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
1605 #define vle_cluster_type(di) \
1606 __vle_cluster_type((di)->di_advise)
1609 vle_decompressed_index_clusterofs(unsigned int *clusterofs,
1610 unsigned int clustersize,
1611 struct z_erofs_vle_decompressed_index *di)
1613 switch (vle_cluster_type(di)) {
1614 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1615 *clusterofs = clustersize;
1617 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1618 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1619 *clusterofs = le16_to_cpu(di->di_clusterofs);
1628 static inline erofs_blk_t
1629 vle_extent_blkaddr(struct inode *inode, pgoff_t index)
1631 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1632 struct erofs_vnode *vi = EROFS_V(inode);
1634 unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1635 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1636 index * sizeof(struct z_erofs_vle_decompressed_index);
1638 return erofs_blknr(iloc(sbi, vi->nid) + ofs);
1641 static inline unsigned int
1642 vle_extent_blkoff(struct inode *inode, pgoff_t index)
1644 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
1645 struct erofs_vnode *vi = EROFS_V(inode);
1647 unsigned int ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
1648 vi->xattr_isize) + sizeof(struct erofs_extent_header) +
1649 index * sizeof(struct z_erofs_vle_decompressed_index);
1651 return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
1654 struct vle_map_blocks_iter_ctx {
1655 struct inode *inode;
1656 struct super_block *sb;
1657 unsigned int clusterbits;
1659 struct page **mpage_ret;
1664 vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
1665 unsigned int lcn, /* logical cluster number */
1666 unsigned long long *ofs,
1668 unsigned int *flags)
1670 const unsigned int clustersize = 1 << ctx->clusterbits;
1671 const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
1672 struct page *mpage = *ctx->mpage_ret; /* extent metapage */
1674 struct z_erofs_vle_decompressed_index *di;
1675 unsigned int cluster_type, delta0;
1677 if (mpage->index != mblk) {
1678 kunmap_atomic(*ctx->kaddr_ret);
1682 mpage = erofs_get_meta_page(ctx->sb, mblk, false);
1683 if (IS_ERR(mpage)) {
1684 *ctx->mpage_ret = NULL;
1685 return PTR_ERR(mpage);
1687 *ctx->mpage_ret = mpage;
1688 *ctx->kaddr_ret = kmap_atomic(mpage);
1691 di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
1693 cluster_type = vle_cluster_type(di);
1694 switch (cluster_type) {
1695 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1696 delta0 = le16_to_cpu(di->di_u.delta[0]);
1697 if (unlikely(!delta0 || delta0 > lcn)) {
1698 errln("invalid NONHEAD dl0 %u at lcn %u of nid %llu",
1699 delta0, lcn, EROFS_V(ctx->inode)->nid);
1703 return vle_get_logical_extent_head(ctx,
1704 lcn - delta0, ofs, pblk, flags);
1705 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1706 *flags ^= EROFS_MAP_ZIPPED;
1708 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1709 /* clustersize should be a power of two */
1710 *ofs = ((u64)lcn << ctx->clusterbits) +
1711 (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
1712 *pblk = le32_to_cpu(di->di_u.blkaddr);
1715 errln("unknown cluster type %u at lcn %u of nid %llu",
1716 cluster_type, lcn, EROFS_V(ctx->inode)->nid);
1723 int z_erofs_map_blocks_iter(struct inode *inode,
1724 struct erofs_map_blocks *map,
1728 const struct vle_map_blocks_iter_ctx ctx = {
1731 .clusterbits = EROFS_I_SB(inode)->clusterbits,
1732 .mpage_ret = &map->mpage,
1735 const unsigned int clustersize = 1 << ctx.clusterbits;
1736 /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
1737 const bool initial = !map->m_llen;
1739 /* logicial extent (start, end) offset */
1740 unsigned long long ofs, end;
1744 /* initialize `pblk' to keep gcc from printing foolish warnings */
1745 erofs_blk_t mblk, pblk = 0;
1746 struct page *mpage = map->mpage;
1747 struct z_erofs_vle_decompressed_index *di;
1748 unsigned int cluster_type, logical_cluster_ofs;
1751 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
1753 /* when trying to read beyond EOF, leave it unmapped */
1754 if (unlikely(map->m_la >= inode->i_size)) {
1755 DBG_BUGON(!initial);
1756 map->m_llen = map->m_la + 1 - inode->i_size;
1757 map->m_la = inode->i_size;
1762 debugln("%s, m_la %llu m_llen %llu --- start", __func__,
1763 map->m_la, map->m_llen);
1765 ofs = map->m_la + map->m_llen;
1767 /* clustersize should be power of two */
1768 lcn = ofs >> ctx.clusterbits;
1769 ofs_rem = ofs & (clustersize - 1);
1771 mblk = vle_extent_blkaddr(inode, lcn);
1773 if (!mpage || mpage->index != mblk) {
1777 mpage = erofs_get_meta_page(ctx.sb, mblk, false);
1778 if (IS_ERR(mpage)) {
1779 err = PTR_ERR(mpage);
1785 DBG_BUGON(!PageUptodate(mpage));
1788 kaddr = kmap_atomic(mpage);
1789 di = kaddr + vle_extent_blkoff(inode, lcn);
1791 debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
1792 mblk, vle_extent_blkoff(inode, lcn));
1794 err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
1800 /* [walking mode] 'map' has been already initialized */
1801 map->m_llen += logical_cluster_ofs;
1805 /* by default, compressed */
1806 map->m_flags |= EROFS_MAP_ZIPPED;
1808 end = ((u64)lcn + 1) * clustersize;
1810 cluster_type = vle_cluster_type(di);
1812 switch (cluster_type) {
1813 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
1814 if (ofs_rem >= logical_cluster_ofs)
1815 map->m_flags ^= EROFS_MAP_ZIPPED;
1817 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
1818 if (ofs_rem == logical_cluster_ofs) {
1819 pblk = le32_to_cpu(di->di_u.blkaddr);
1823 if (ofs_rem > logical_cluster_ofs) {
1824 ofs = (u64)lcn * clustersize | logical_cluster_ofs;
1825 pblk = le32_to_cpu(di->di_u.blkaddr);
1829 /* logical cluster number should be >= 1 */
1830 if (unlikely(!lcn)) {
1831 errln("invalid logical cluster 0 at nid %llu",
1832 EROFS_V(inode)->nid);
1836 end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
1838 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
1839 /* get the correspoinding first chunk */
1840 err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
1841 &pblk, &map->m_flags);
1844 if (unlikely(err)) {
1851 errln("unknown cluster type %u at offset %llu of nid %llu",
1852 cluster_type, ofs, EROFS_V(inode)->nid);
1859 map->m_llen = end - ofs;
1860 map->m_plen = clustersize;
1861 map->m_pa = blknr_to_addr(pblk);
1862 map->m_flags |= EROFS_MAP_MAPPED;
1864 kunmap_atomic(kaddr);
1867 debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
1868 __func__, map->m_la, map->m_pa,
1869 map->m_llen, map->m_plen, map->m_flags);
1871 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
1873 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
1874 DBG_BUGON(err < 0 && err != -ENOMEM);