1 /* SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/internal.h
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
17 #include <linux/dcache.h>
19 #include <linux/pagemap.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/cleancache.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
27 /* redefine pr_fmt "erofs: " */
29 #define pr_fmt(fmt) "erofs: " fmt
31 #define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__)
32 #define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
33 #ifdef CONFIG_EROFS_FS_DEBUG
34 #define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
36 #define dbg_might_sleep might_sleep
37 #define DBG_BUGON BUG_ON
39 #define debugln(x, ...) ((void)0)
41 #define dbg_might_sleep() ((void)0)
42 #define DBG_BUGON(x) ((void)(x))
50 #ifdef CONFIG_EROFS_FAULT_INJECTION
51 extern char *erofs_fault_name[FAULT_MAX];
52 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
54 struct erofs_fault_info {
56 unsigned int inject_rate;
57 unsigned int inject_type;
61 #ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
62 #define EROFS_FS_ZIP_CACHE_LVL (2)
63 #elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
64 #define EROFS_FS_ZIP_CACHE_LVL (1)
66 #define EROFS_FS_ZIP_CACHE_LVL (0)
69 #if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
70 #define EROFS_FS_HAS_MANAGED_CACHE
73 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
74 #define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
76 typedef u64 erofs_nid_t;
78 struct erofs_sb_info {
79 /* list for all registered superblocks, mainly for shrinker */
80 struct list_head list;
81 struct mutex umount_mutex;
85 #ifdef CONFIG_EROFS_FS_XATTR
89 /* inode slot unit size in bit shift */
90 unsigned char islotbits;
91 #ifdef CONFIG_EROFS_FS_ZIP
92 /* cluster size in bit shift */
93 unsigned char clusterbits;
95 /* the dedicated workstation for compression */
96 struct radix_tree_root workstn_tree;
98 /* threshold for decompression synchronously */
99 unsigned int max_sync_decompress_pages;
101 #ifdef EROFS_FS_HAS_MANAGED_CACHE
102 struct inode *managed_cache;
110 /* what we really care is nid, rather than ino.. */
111 erofs_nid_t root_nid;
112 /* used for statfs, f_files - f_favail */
115 u8 uuid[16]; /* 128-bit uuid for volume */
116 u8 volume_name[16]; /* volume name */
119 unsigned int mount_opt;
120 unsigned int shrinker_run_no;
122 #ifdef CONFIG_EROFS_FAULT_INJECTION
123 struct erofs_fault_info fault_info; /* For fault injection */
127 #ifdef CONFIG_EROFS_FAULT_INJECTION
128 #define erofs_show_injection_info(type) \
129 infoln("inject %s in %s of %pS", erofs_fault_name[type], \
130 __func__, __builtin_return_address(0))
132 static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
134 struct erofs_fault_info *ffi = &sbi->fault_info;
136 if (!ffi->inject_rate)
139 if (!IS_FAULT_SET(ffi, type))
142 atomic_inc(&ffi->inject_ops);
143 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
144 atomic_set(&ffi->inject_ops, 0);
150 static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
155 static inline void erofs_show_injection_info(int type)
160 static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
161 size_t size, gfp_t flags)
163 if (time_to_inject(sbi, FAULT_KMALLOC)) {
164 erofs_show_injection_info(FAULT_KMALLOC);
167 return kmalloc(size, flags);
170 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
171 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
173 /* Mount flags set via mount options or defaults */
174 #define EROFS_MOUNT_XATTR_USER 0x00000010
175 #define EROFS_MOUNT_POSIX_ACL 0x00000020
176 #define EROFS_MOUNT_FAULT_INJECTION 0x00000040
178 #define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
179 #define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
180 #define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
182 #ifdef CONFIG_EROFS_FS_ZIP
183 #define erofs_workstn_lock(sbi) xa_lock(&(sbi)->workstn_tree)
184 #define erofs_workstn_unlock(sbi) xa_unlock(&(sbi)->workstn_tree)
186 /* basic unit of the workstation of a super_block */
187 struct erofs_workgroup {
188 /* the workgroup index in the workstation */
191 /* overall workgroup reference count */
195 #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
197 #if defined(CONFIG_SMP)
198 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
202 if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
209 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
213 * other observers should notice all modifications
214 * in the freezing period.
217 atomic_set(&grp->refcount, orig_val);
221 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
223 return atomic_cond_read_relaxed(&grp->refcount,
224 VAL != EROFS_LOCKED_MAGIC);
227 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
231 /* no need to spin on UP platforms, let's just disable preemption. */
232 if (val != atomic_read(&grp->refcount)) {
239 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
245 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
247 int v = atomic_read(&grp->refcount);
249 /* workgroup is never freezed on uniprocessor systems */
250 DBG_BUGON(v == EROFS_LOCKED_MAGIC);
255 static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
260 o = erofs_wait_on_workgroup_freezed(grp);
262 if (unlikely(o <= 0))
265 if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
272 #define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
273 #define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
275 extern int erofs_workgroup_put(struct erofs_workgroup *grp);
277 extern struct erofs_workgroup *erofs_find_workgroup(
278 struct super_block *sb, pgoff_t index, bool *tag);
280 extern int erofs_register_workgroup(struct super_block *sb,
281 struct erofs_workgroup *grp, bool tag);
283 extern unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
284 unsigned long nr_shrink, bool cleanup);
286 static inline void erofs_workstation_cleanup_all(struct super_block *sb)
288 erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
291 #ifdef EROFS_FS_HAS_MANAGED_CACHE
292 extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
293 struct erofs_workgroup *egrp);
294 extern int erofs_try_to_free_cached_page(struct address_space *mapping,
297 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
299 #define MNGD_MAPPING(sbi) (NULL)
302 #define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
304 static inline bool __should_decompress_synchronously(struct erofs_sb_info *sbi,
307 return nr <= sbi->max_sync_decompress_pages;
310 int __init z_erofs_init_zip_subsystem(void);
311 void z_erofs_exit_zip_subsystem(void);
313 /* dummy initializer/finalizer for the decompression subsystem */
314 static inline int z_erofs_init_zip_subsystem(void) { return 0; }
315 static inline void z_erofs_exit_zip_subsystem(void) {}
318 /* we strictly follow PAGE_SIZE and no buffer head yet */
319 #define LOG_BLOCK_SIZE PAGE_SHIFT
321 #undef LOG_SECTORS_PER_BLOCK
322 #define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
324 #undef SECTORS_PER_BLOCK
325 #define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
327 #define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
329 #if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
330 #error erofs cannot be used in this platform
333 #define ROOT_NID(sb) ((sb)->root_nid)
335 #ifdef CONFIG_EROFS_FS_ZIP
336 /* hard limit of pages per compressed cluster */
337 #define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
339 /* page count of a compressed cluster */
340 #define erofs_clusterpages(sbi) ((1 << (sbi)->clusterbits) / PAGE_SIZE)
343 typedef u64 erofs_off_t;
345 /* data type for filesystem-wide blocks number */
346 typedef u32 erofs_blk_t;
348 #define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
349 #define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
350 #define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
352 static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
354 return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
357 #define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1)
358 #define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1)
364 unsigned char data_mapping_mode;
365 /* inline size in bytes */
366 unsigned char inode_isize;
367 unsigned short xattr_isize;
369 unsigned xattr_shared_count;
370 unsigned *xattr_shared_xattrs;
372 erofs_blk_t raw_blkaddr;
374 /* the corresponding vfs inode */
375 struct inode vfs_inode;
378 #define EROFS_V(ptr) \
379 container_of(ptr, struct erofs_vnode, vfs_inode)
381 #define __inode_advise(x, bit, bits) \
382 (((x) >> (bit)) & ((1 << (bits)) - 1))
384 #define __inode_version(advise) \
385 __inode_advise(advise, EROFS_I_VERSION_BIT, \
386 EROFS_I_VERSION_BITS)
388 #define __inode_data_mapping(advise) \
389 __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
390 EROFS_I_DATA_MAPPING_BITS)
392 static inline unsigned long inode_datablocks(struct inode *inode)
394 /* since i_size cannot be changed */
395 return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
398 static inline bool is_inode_layout_plain(struct inode *inode)
400 return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_PLAIN;
403 static inline bool is_inode_layout_compression(struct inode *inode)
405 return EROFS_V(inode)->data_mapping_mode ==
406 EROFS_INODE_LAYOUT_COMPRESSION;
409 static inline bool is_inode_layout_inline(struct inode *inode)
411 return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_INLINE;
414 extern const struct super_operations erofs_sops;
415 extern const struct inode_operations erofs_dir_iops;
416 extern const struct file_operations erofs_dir_fops;
418 extern const struct address_space_operations erofs_raw_access_aops;
419 #ifdef CONFIG_EROFS_FS_ZIP
420 extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
424 * Logical to physical block mapping, used by erofs_map_blocks()
426 * Different with other file systems, it is used for 2 access modes:
428 * 1) RAW access mode:
430 * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
431 * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
433 * Note that m_lblk in the RAW access mode refers to the number of
434 * the compressed ondisk block rather than the uncompressed
435 * in-memory block for the compressed file.
437 * m_pofs equals to m_lofs except for the inline data page.
439 * 2) Normal access mode:
441 * If the inode is not compressed, it has no difference with
442 * the RAW access mode. However, if the inode is compressed,
443 * users should pass a valid (m_lblk, m_lofs) pair, and get
444 * the needed m_pblk, m_pofs, m_len to get the compressed data
445 * and the updated m_lblk, m_lofs which indicates the start
446 * of the corresponding uncompressed data in the file.
449 BH_Zipped = BH_PrivateStart,
452 /* Has a disk mapping */
453 #define EROFS_MAP_MAPPED (1 << BH_Mapped)
454 /* Located in metadata (could be copied from bd_inode) */
455 #define EROFS_MAP_META (1 << BH_Meta)
456 /* The extent has been compressed */
457 #define EROFS_MAP_ZIPPED (1 << BH_Zipped)
459 struct erofs_map_blocks {
460 erofs_off_t m_pa, m_la;
463 unsigned int m_flags;
466 /* Flags used by erofs_map_blocks() */
467 #define EROFS_GET_BLOCKS_RAW 0x0001
470 static inline struct bio *
471 erofs_grab_bio(struct super_block *sb,
472 erofs_blk_t blkaddr, unsigned int nr_pages,
473 bio_end_io_t endio, bool nofail)
475 const gfp_t gfp = GFP_NOIO;
480 bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
481 if (unlikely(bio == NULL)) {
483 return ERR_PTR(-ENOMEM);
487 bio = bio_alloc(gfp, nr_pages);
489 } while (unlikely(bio == NULL));
491 bio->bi_end_io = endio;
492 bio_set_dev(bio, sb->s_bdev);
493 bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
497 static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
499 bio_set_op_attrs(bio, op, op_flags);
503 #ifndef CONFIG_EROFS_FS_IO_MAX_RETRIES
504 #define EROFS_IO_MAX_RETRIES_NOFAIL 0
506 #define EROFS_IO_MAX_RETRIES_NOFAIL CONFIG_EROFS_FS_IO_MAX_RETRIES
509 extern struct page *__erofs_get_meta_page(struct super_block *sb,
510 erofs_blk_t blkaddr, bool prio, bool nofail);
512 static inline struct page *erofs_get_meta_page(struct super_block *sb,
513 erofs_blk_t blkaddr, bool prio)
515 return __erofs_get_meta_page(sb, blkaddr, prio, false);
518 static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
519 erofs_blk_t blkaddr, bool prio)
521 return __erofs_get_meta_page(sb, blkaddr, prio, true);
524 extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
525 extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
526 struct page **, int);
528 struct erofs_map_blocks_iter {
529 struct erofs_map_blocks map;
534 static inline struct page *
535 erofs_get_inline_page(struct inode *inode,
538 return erofs_get_meta_page(inode->i_sb,
539 blkaddr, S_ISDIR(inode->i_mode));
543 static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
545 #if BITS_PER_LONG == 32
546 return (nid >> 32) ^ (nid & 0xffffffff);
552 extern struct inode *erofs_iget(struct super_block *sb,
553 erofs_nid_t nid, bool dir);
556 int erofs_namei(struct inode *dir, struct qstr *name,
557 erofs_nid_t *nid, unsigned *d_type);
559 #ifdef CONFIG_EROFS_FS_XATTR
561 extern const struct xattr_handler *erofs_xattr_handlers[];
563 /* symlink and special inode */
564 extern const struct inode_operations erofs_symlink_xattr_iops;
565 extern const struct inode_operations erofs_fast_symlink_xattr_iops;
566 extern const struct inode_operations erofs_special_inode_operations;
569 static inline void set_inode_fast_symlink(struct inode *inode)
571 #ifdef CONFIG_EROFS_FS_XATTR
572 inode->i_op = &erofs_fast_symlink_xattr_iops;
574 inode->i_op = &simple_symlink_inode_operations;
578 static inline bool is_inode_fast_symlink(struct inode *inode)
580 #ifdef CONFIG_EROFS_FS_XATTR
581 return inode->i_op == &erofs_fast_symlink_xattr_iops;
583 return inode->i_op == &simple_symlink_inode_operations;
587 static inline void *erofs_vmap(struct page **pages, unsigned int count)
589 #ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
593 void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
594 /* retry two more times (totally 3 times) */
595 if (addr != NULL || ++i >= 3)
601 return vmap(pages, count, VM_MAP, PAGE_KERNEL);
605 static inline void erofs_vunmap(const void *mem, unsigned int count)
607 #ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
608 vm_unmap_ram(mem, count);
615 extern struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
617 extern void erofs_register_super(struct super_block *sb);
618 extern void erofs_unregister_super(struct super_block *sb);
620 extern unsigned long erofs_shrink_count(struct shrinker *shrink,
621 struct shrink_control *sc);
622 extern unsigned long erofs_shrink_scan(struct shrinker *shrink,
623 struct shrink_control *sc);
626 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))