1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/staging/erofs/inode.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
11 #include <trace/events/erofs.h>
14 static int read_inode(struct inode *inode, void *data)
16 struct erofs_vnode *vi = EROFS_V(inode);
17 struct erofs_inode_v1 *v1 = data;
18 const unsigned int advise = le16_to_cpu(v1->i_advise);
19 erofs_blk_t nblks = 0;
21 vi->datamode = __inode_data_mapping(advise);
23 if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) {
24 errln("unsupported data mapping %u of nid %llu",
25 vi->datamode, vi->nid);
30 if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) {
31 struct erofs_inode_v2 *v2 = data;
33 vi->inode_isize = sizeof(struct erofs_inode_v2);
34 vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount);
36 inode->i_mode = le16_to_cpu(v2->i_mode);
37 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
38 S_ISLNK(inode->i_mode))
39 vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr);
40 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
42 new_decode_dev(le32_to_cpu(v2->i_u.rdev));
43 else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode))
48 i_uid_write(inode, le32_to_cpu(v2->i_uid));
49 i_gid_write(inode, le32_to_cpu(v2->i_gid));
50 set_nlink(inode, le32_to_cpu(v2->i_nlink));
53 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
54 le64_to_cpu(v2->i_ctime);
55 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
56 le32_to_cpu(v2->i_ctime_nsec);
58 inode->i_size = le64_to_cpu(v2->i_size);
60 /* total blocks for compressed files */
61 if (is_inode_layout_compression(inode))
62 nblks = le32_to_cpu(v2->i_u.compressed_blocks);
63 } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) {
64 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
66 vi->inode_isize = sizeof(struct erofs_inode_v1);
67 vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount);
69 inode->i_mode = le16_to_cpu(v1->i_mode);
70 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
71 S_ISLNK(inode->i_mode))
72 vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr);
73 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
75 new_decode_dev(le32_to_cpu(v1->i_u.rdev));
76 else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode))
81 i_uid_write(inode, le16_to_cpu(v1->i_uid));
82 i_gid_write(inode, le16_to_cpu(v1->i_gid));
83 set_nlink(inode, le16_to_cpu(v1->i_nlink));
85 /* use build time to derive all file time */
86 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
88 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
91 inode->i_size = le32_to_cpu(v1->i_size);
92 if (is_inode_layout_compression(inode))
93 nblks = le32_to_cpu(v1->i_u.compressed_blocks);
95 errln("unsupported on-disk inode version %u of nid %llu",
96 __inode_version(advise), vi->nid);
102 /* measure inode.i_blocks as generic filesystems */
103 inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
105 inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
109 errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid);
111 return -EFSCORRUPTED;
115 * try_lock can be required since locking order is:
116 * file data(fs_inode)
118 * but the majority of the callers is "iget",
119 * in that case we are pretty sure no deadlock since
120 * no data operations exist. However I tend to
121 * try_lock since it takes no much overhead and
122 * will success immediately.
124 static int fill_inline_data(struct inode *inode, void *data,
127 struct erofs_vnode *vi = EROFS_V(inode);
128 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
130 /* should be inode inline C */
131 if (!is_inode_flat_inline(inode))
134 /* fast symlink (following ext4) */
135 if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) {
136 char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL);
141 m_pofs += vi->inode_isize + vi->xattr_isize;
143 /* inline symlink data shouldn't across page boundary as well */
144 if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
146 errln("inline data cross block boundary @ nid %llu",
149 return -EFSCORRUPTED;
152 /* get in-page inline data */
153 memcpy(lnk, data + m_pofs, inode->i_size);
154 lnk[inode->i_size] = '\0';
157 set_inode_fast_symlink(inode);
162 static int fill_inode(struct inode *inode, int isdir)
164 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
165 struct erofs_vnode *vi = EROFS_V(inode);
171 erofs_off_t inode_loc;
173 trace_erofs_fill_inode(inode, isdir);
174 inode_loc = iloc(sbi, vi->nid);
175 blkaddr = erofs_blknr(inode_loc);
176 ofs = erofs_blkoff(inode_loc);
178 debugln("%s, reading inode nid %llu at %u of blkaddr %u",
179 __func__, vi->nid, ofs, blkaddr);
181 page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir);
184 errln("failed to get inode (nid: %llu) page, err %ld",
185 vi->nid, PTR_ERR(page));
186 return PTR_ERR(page);
189 DBG_BUGON(!PageUptodate(page));
190 data = page_address(page);
192 err = read_inode(inode, data + ofs);
194 /* setup the new inode */
195 if (S_ISREG(inode->i_mode)) {
196 inode->i_op = &erofs_generic_iops;
197 inode->i_fop = &generic_ro_fops;
198 } else if (S_ISDIR(inode->i_mode)) {
199 inode->i_op = &erofs_dir_iops;
200 inode->i_fop = &erofs_dir_fops;
201 } else if (S_ISLNK(inode->i_mode)) {
202 /* by default, page_get_link is used for symlink */
203 inode->i_op = &erofs_symlink_iops;
204 inode_nohighmem(inode);
205 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
206 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
207 inode->i_op = &erofs_generic_iops;
208 init_special_inode(inode, inode->i_mode, inode->i_rdev);
215 if (is_inode_layout_compression(inode)) {
216 err = z_erofs_fill_inode(inode);
220 inode->i_mapping->a_ops = &erofs_raw_access_aops;
222 /* fill last page if inline data is available */
223 err = fill_inline_data(inode, data, ofs);
233 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
234 * we should do more for 32-bit platform to find the right inode.
236 #if BITS_PER_LONG == 32
237 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
239 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
241 return EROFS_V(inode)->nid == nid;
244 static int erofs_iget_set_actor(struct inode *inode, void *opaque)
246 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
248 inode->i_ino = erofs_inode_hash(nid);
253 static inline struct inode *erofs_iget_locked(struct super_block *sb,
256 const unsigned long hashval = erofs_inode_hash(nid);
258 #if BITS_PER_LONG >= 64
259 /* it is safe to use iget_locked for >= 64-bit platform */
260 return iget_locked(sb, hashval);
262 return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
263 erofs_iget_set_actor, &nid);
267 struct inode *erofs_iget(struct super_block *sb,
271 struct inode *inode = erofs_iget_locked(sb, nid);
273 if (unlikely(!inode))
274 return ERR_PTR(-ENOMEM);
276 if (inode->i_state & I_NEW) {
278 struct erofs_vnode *vi = EROFS_V(inode);
282 err = fill_inode(inode, isdir);
284 unlock_new_inode(inode);
287 inode = ERR_PTR(err);
293 int erofs_getattr(const struct path *path, struct kstat *stat,
294 u32 request_mask, unsigned int query_flags)
296 struct inode *const inode = d_inode(path->dentry);
298 if (is_inode_layout_compression(inode))
299 stat->attributes |= STATX_ATTR_COMPRESSED;
301 stat->attributes |= STATX_ATTR_IMMUTABLE;
302 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
303 STATX_ATTR_IMMUTABLE);
305 generic_fillattr(inode, stat);
309 const struct inode_operations erofs_generic_iops = {
310 .getattr = erofs_getattr,
311 #ifdef CONFIG_EROFS_FS_XATTR
312 .listxattr = erofs_listxattr,
314 .get_acl = erofs_get_acl,
317 const struct inode_operations erofs_symlink_iops = {
318 .get_link = page_get_link,
319 .getattr = erofs_getattr,
320 #ifdef CONFIG_EROFS_FS_XATTR
321 .listxattr = erofs_listxattr,
323 .get_acl = erofs_get_acl,
326 const struct inode_operations erofs_fast_symlink_iops = {
327 .get_link = simple_get_link,
328 .getattr = erofs_getattr,
329 #ifdef CONFIG_EROFS_FS_XATTR
330 .listxattr = erofs_listxattr,
332 .get_acl = erofs_get_acl,