1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/drivers/staging/erofs/zdata.h
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 #ifndef __EROFS_FS_ZDATA_H
10 #define __EROFS_FS_ZDATA_H
15 #define Z_EROFS_NR_INLINE_PAGEVECS 3
18 * Structure fields follow one of the following exclusion rules.
20 * I: Modifiable by initialization/destruction paths and read-only
23 * L: Field should be protected by pageset lock;
25 * A: Field should be accessed / updated in atomic for parallelized code.
27 struct z_erofs_collection {
30 /* I: page offset of start position of decompression */
31 unsigned short pageofs;
33 /* L: maximum relative page index in pagevec[] */
34 unsigned short nr_pages;
36 /* L: total number of pages in pagevec[] */
40 /* L: inline a certain number of pagevecs for bootstrap */
41 erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
43 /* I: can be used to free the pcluster by RCU. */
48 #define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
49 #define Z_EROFS_PCLUSTER_LENGTH_BIT 1
52 * let's leave a type here in case of introducing
53 * another tagged pointer later.
55 typedef void *z_erofs_next_pcluster_t;
57 struct z_erofs_pcluster {
58 struct erofs_workgroup obj;
59 struct z_erofs_collection primary_collection;
61 /* A: point to next chained pcluster or TAILs */
62 z_erofs_next_pcluster_t next;
64 /* A: compressed pages (including multi-usage pages) */
65 struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
67 /* A: lower limit of decompressed length and if full length or not */
70 /* I: compression algorithm format */
71 unsigned char algorithmformat;
72 /* I: bit shift of physical cluster size */
73 unsigned char clusterbits;
76 #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
78 /* let's avoid the valid 32-bit kernel addresses */
80 /* the chained workgroup has't submitted io (still open) */
81 #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
82 /* the chained workgroup has already submitted io */
83 #define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
85 #define Z_EROFS_PCLUSTER_NIL (NULL)
87 #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
89 struct z_erofs_unzip_io {
90 atomic_t pending_bios;
91 z_erofs_next_pcluster_t head;
94 wait_queue_head_t wait;
95 struct work_struct work;
99 struct z_erofs_unzip_io_sb {
100 struct z_erofs_unzip_io io;
101 struct super_block *sb;
104 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
105 static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
108 return page->mapping == MNGD_MAPPING(sbi);
111 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
112 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
113 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
116 * waiters (aka. ongoing_packs): # to unlock the page
117 * sub-index: 0 - for partial page, >= 1 full page sub-index
119 typedef atomic_t z_erofs_onlinepage_t;
122 union z_erofs_onlinepage_converter {
123 z_erofs_onlinepage_t *o;
127 static inline unsigned int z_erofs_onlinepage_index(struct page *page)
129 union z_erofs_onlinepage_converter u;
131 DBG_BUGON(!PagePrivate(page));
132 u.v = &page_private(page);
134 return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
137 static inline void z_erofs_onlinepage_init(struct page *page)
140 z_erofs_onlinepage_t o;
142 /* keep from being unlocked in advance */
143 } u = { .o = ATOMIC_INIT(1) };
145 set_page_private(page, u.v);
147 SetPagePrivate(page);
150 static inline void z_erofs_onlinepage_fixup(struct page *page,
151 uintptr_t index, bool down)
153 unsigned long *p, o, v, id;
155 p = &page_private(page);
158 id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
163 DBG_BUGON(id != index);
166 v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
167 ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
168 if (cmpxchg(p, o, v) != o)
172 static inline void z_erofs_onlinepage_endio(struct page *page)
174 union z_erofs_onlinepage_converter u;
177 DBG_BUGON(!PagePrivate(page));
178 u.v = &page_private(page);
180 v = atomic_dec_return(u.o);
181 if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
182 ClearPagePrivate(page);
183 if (!PageError(page))
184 SetPageUptodate(page);
187 debugln("%s, page %p value %x", __func__, page, atomic_read(u.o));
190 #define Z_EROFS_VMAP_ONSTACK_PAGES \
191 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
192 #define Z_EROFS_VMAP_GLOBAL_PAGES 2048