1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
7 #include <linux/security.h>
11 struct super_block *sb;
19 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
21 /* the only user of kunmap() is 'init_inode_xattrs' */
22 if (unlikely(!atomic))
25 kunmap_atomic(it->kaddr);
27 unlock_page(it->page);
31 static inline void xattr_iter_end_final(struct xattr_iter *it)
36 xattr_iter_end(it, true);
39 static int init_inode_xattrs(struct inode *inode)
41 struct erofs_vnode *const vi = EROFS_V(inode);
44 struct erofs_xattr_ibody_header *ih;
45 struct super_block *sb;
46 struct erofs_sb_info *sbi;
50 /* the most case is that xattrs of this inode are initialized. */
51 if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
54 if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
57 /* someone has initialized xattrs for us? */
58 if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
62 * bypass all xattr operations if ->xattr_isize is not greater than
63 * sizeof(struct erofs_xattr_ibody_header), in detail:
64 * 1) it is not enough to contain erofs_xattr_ibody_header then
65 * ->xattr_isize should be 0 (it means no xattr);
66 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
67 * undefined right now (maybe use later with some new sb feature).
69 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
70 errln("xattr_isize %d of nid %llu is not supported yet",
71 vi->xattr_isize, vi->nid);
74 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
75 if (unlikely(vi->xattr_isize)) {
76 errln("bogus xattr ibody @ nid %llu", vi->nid);
79 goto out_unlock; /* xattr ondisk layout error */
87 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
88 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
90 it.page = erofs_get_inline_page(inode, it.blkaddr);
91 if (IS_ERR(it.page)) {
92 ret = PTR_ERR(it.page);
96 /* read in shared xattr array (non-atomic, see kmalloc below) */
97 it.kaddr = kmap(it.page);
100 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
102 vi->xattr_shared_count = ih->h_shared_count;
103 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
104 sizeof(uint), GFP_KERNEL);
105 if (!vi->xattr_shared_xattrs) {
106 xattr_iter_end(&it, atomic_map);
111 /* let's skip ibody header */
112 it.ofs += sizeof(struct erofs_xattr_ibody_header);
114 for (i = 0; i < vi->xattr_shared_count; ++i) {
115 if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
116 /* cannot be unaligned */
117 DBG_BUGON(it.ofs != EROFS_BLKSIZ);
118 xattr_iter_end(&it, atomic_map);
120 it.page = erofs_get_meta_page(sb, ++it.blkaddr,
121 S_ISDIR(inode->i_mode));
122 if (IS_ERR(it.page)) {
123 kfree(vi->xattr_shared_xattrs);
124 vi->xattr_shared_xattrs = NULL;
125 ret = PTR_ERR(it.page);
129 it.kaddr = kmap_atomic(it.page);
133 vi->xattr_shared_xattrs[i] =
134 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
135 it.ofs += sizeof(__le32);
137 xattr_iter_end(&it, atomic_map);
139 set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
142 clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
147 * the general idea for these return values is
148 * if 0 is returned, go on processing the current xattr;
149 * 1 (> 0) is returned, skip this round to process the next xattr;
150 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
151 * and need to be handled
153 struct xattr_iter_handlers {
154 int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
155 int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
157 int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
158 void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
162 static inline int xattr_iter_fixup(struct xattr_iter *it)
164 if (it->ofs < EROFS_BLKSIZ)
167 xattr_iter_end(it, true);
169 it->blkaddr += erofs_blknr(it->ofs);
171 it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
172 if (IS_ERR(it->page)) {
173 int err = PTR_ERR(it->page);
179 it->kaddr = kmap_atomic(it->page);
180 it->ofs = erofs_blkoff(it->ofs);
184 static int inline_xattr_iter_begin(struct xattr_iter *it,
187 struct erofs_vnode *const vi = EROFS_V(inode);
188 struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
189 unsigned int xattr_header_sz, inline_xattr_ofs;
191 xattr_header_sz = inlinexattr_header_size(inode);
192 if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
193 DBG_BUGON(xattr_header_sz > vi->xattr_isize);
197 inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
199 it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
200 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
202 it->page = erofs_get_inline_page(inode, it->blkaddr);
203 if (IS_ERR(it->page))
204 return PTR_ERR(it->page);
206 it->kaddr = kmap_atomic(it->page);
207 return vi->xattr_isize - xattr_header_sz;
211 * Regardless of success or failure, `xattr_foreach' will end up with
212 * `ofs' pointing to the next xattr item rather than an arbitrary position.
214 static int xattr_foreach(struct xattr_iter *it,
215 const struct xattr_iter_handlers *op,
216 unsigned int *tlimit)
218 struct erofs_xattr_entry entry;
219 unsigned int value_sz, processed, slice;
222 /* 0. fixup blkaddr, ofs, ipage */
223 err = xattr_iter_fixup(it);
228 * 1. read xattr entry to the memory,
229 * since we do EROFS_XATTR_ALIGN
230 * therefore entry should be in the page
232 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
234 unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
236 /* xattr on-disk corruption: xattr entry beyond xattr_isize */
237 if (unlikely(*tlimit < entry_sz)) {
239 return -EFSCORRUPTED;
244 it->ofs += sizeof(struct erofs_xattr_entry);
245 value_sz = le16_to_cpu(entry.e_value_size);
248 err = op->entry(it, &entry);
250 it->ofs += entry.e_name_len + value_sz;
254 /* 2. handle xattr name (ofs will finally be at the end of name) */
257 while (processed < entry.e_name_len) {
258 if (it->ofs >= EROFS_BLKSIZ) {
259 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
261 err = xattr_iter_fixup(it);
267 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
268 entry.e_name_len - processed);
271 err = op->name(it, processed, it->kaddr + it->ofs, slice);
273 it->ofs += entry.e_name_len - processed + value_sz;
281 /* 3. handle xattr value */
284 if (op->alloc_buffer) {
285 err = op->alloc_buffer(it, value_sz);
292 while (processed < value_sz) {
293 if (it->ofs >= EROFS_BLKSIZ) {
294 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
296 err = xattr_iter_fixup(it);
302 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
303 value_sz - processed);
304 op->value(it, processed, it->kaddr + it->ofs, slice);
310 /* xattrs should be 4-byte aligned (on-disk constraint) */
311 it->ofs = EROFS_XATTR_ALIGN(it->ofs);
312 return err < 0 ? err : 0;
315 struct getxattr_iter {
316 struct xattr_iter it;
319 int buffer_size, index;
323 static int xattr_entrymatch(struct xattr_iter *_it,
324 struct erofs_xattr_entry *entry)
326 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
328 return (it->index != entry->e_name_index ||
329 it->name.len != entry->e_name_len) ? -ENOATTR : 0;
332 static int xattr_namematch(struct xattr_iter *_it,
333 unsigned int processed, char *buf, unsigned int len)
335 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
337 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
340 static int xattr_checkbuffer(struct xattr_iter *_it,
341 unsigned int value_sz)
343 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
344 int err = it->buffer_size < value_sz ? -ERANGE : 0;
346 it->buffer_size = value_sz;
347 return !it->buffer ? 1 : err;
350 static void xattr_copyvalue(struct xattr_iter *_it,
351 unsigned int processed,
352 char *buf, unsigned int len)
354 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
356 memcpy(it->buffer + processed, buf, len);
359 static const struct xattr_iter_handlers find_xattr_handlers = {
360 .entry = xattr_entrymatch,
361 .name = xattr_namematch,
362 .alloc_buffer = xattr_checkbuffer,
363 .value = xattr_copyvalue
366 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
369 unsigned int remaining;
371 ret = inline_xattr_iter_begin(&it->it, inode);
377 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
381 xattr_iter_end_final(&it->it);
383 return ret ? ret : it->buffer_size;
386 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
388 struct erofs_vnode *const vi = EROFS_V(inode);
389 struct super_block *const sb = inode->i_sb;
390 struct erofs_sb_info *const sbi = EROFS_SB(sb);
394 for (i = 0; i < vi->xattr_shared_count; ++i) {
395 erofs_blk_t blkaddr =
396 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
398 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
400 if (!i || blkaddr != it->it.blkaddr) {
402 xattr_iter_end(&it->it, true);
404 it->it.page = erofs_get_meta_page(sb, blkaddr, false);
405 if (IS_ERR(it->it.page))
406 return PTR_ERR(it->it.page);
408 it->it.kaddr = kmap_atomic(it->it.page);
409 it->it.blkaddr = blkaddr;
412 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
416 if (vi->xattr_shared_count)
417 xattr_iter_end_final(&it->it);
419 return ret ? ret : it->buffer_size;
422 static bool erofs_xattr_user_list(struct dentry *dentry)
424 return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
427 static bool erofs_xattr_trusted_list(struct dentry *dentry)
429 return capable(CAP_SYS_ADMIN);
432 int erofs_getxattr(struct inode *inode, int index,
434 void *buffer, size_t buffer_size)
437 struct getxattr_iter it;
442 ret = init_inode_xattrs(inode);
448 it.name.len = strlen(name);
449 if (it.name.len > EROFS_NAME_LEN)
454 it.buffer_size = buffer_size;
456 it.it.sb = inode->i_sb;
457 ret = inline_getxattr(inode, &it);
459 ret = shared_getxattr(inode, &it);
463 static int erofs_xattr_generic_get(const struct xattr_handler *handler,
464 struct dentry *unused, struct inode *inode,
465 const char *name, void *buffer, size_t size)
467 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
469 switch (handler->flags) {
470 case EROFS_XATTR_INDEX_USER:
471 if (!test_opt(sbi, XATTR_USER))
474 case EROFS_XATTR_INDEX_TRUSTED:
475 if (!capable(CAP_SYS_ADMIN))
478 case EROFS_XATTR_INDEX_SECURITY:
484 return erofs_getxattr(inode, handler->flags, name, buffer, size);
487 const struct xattr_handler erofs_xattr_user_handler = {
488 .prefix = XATTR_USER_PREFIX,
489 .flags = EROFS_XATTR_INDEX_USER,
490 .list = erofs_xattr_user_list,
491 .get = erofs_xattr_generic_get,
494 const struct xattr_handler erofs_xattr_trusted_handler = {
495 .prefix = XATTR_TRUSTED_PREFIX,
496 .flags = EROFS_XATTR_INDEX_TRUSTED,
497 .list = erofs_xattr_trusted_list,
498 .get = erofs_xattr_generic_get,
501 #ifdef CONFIG_EROFS_FS_SECURITY
502 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
503 .prefix = XATTR_SECURITY_PREFIX,
504 .flags = EROFS_XATTR_INDEX_SECURITY,
505 .get = erofs_xattr_generic_get,
509 const struct xattr_handler *erofs_xattr_handlers[] = {
510 &erofs_xattr_user_handler,
511 #ifdef CONFIG_EROFS_FS_POSIX_ACL
512 &posix_acl_access_xattr_handler,
513 &posix_acl_default_xattr_handler,
515 &erofs_xattr_trusted_handler,
516 #ifdef CONFIG_EROFS_FS_SECURITY
517 &erofs_xattr_security_handler,
522 struct listxattr_iter {
523 struct xattr_iter it;
525 struct dentry *dentry;
527 int buffer_size, buffer_ofs;
530 static int xattr_entrylist(struct xattr_iter *_it,
531 struct erofs_xattr_entry *entry)
533 struct listxattr_iter *it =
534 container_of(_it, struct listxattr_iter, it);
535 unsigned int prefix_len;
538 const struct xattr_handler *h =
539 erofs_xattr_handler(entry->e_name_index);
541 if (!h || (h->list && !h->list(it->dentry)))
544 prefix = xattr_prefix(h);
545 prefix_len = strlen(prefix);
548 it->buffer_ofs += prefix_len + entry->e_name_len + 1;
552 if (it->buffer_ofs + prefix_len
553 + entry->e_name_len + 1 > it->buffer_size)
556 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
557 it->buffer_ofs += prefix_len;
561 static int xattr_namelist(struct xattr_iter *_it,
562 unsigned int processed, char *buf, unsigned int len)
564 struct listxattr_iter *it =
565 container_of(_it, struct listxattr_iter, it);
567 memcpy(it->buffer + it->buffer_ofs, buf, len);
568 it->buffer_ofs += len;
572 static int xattr_skipvalue(struct xattr_iter *_it,
573 unsigned int value_sz)
575 struct listxattr_iter *it =
576 container_of(_it, struct listxattr_iter, it);
578 it->buffer[it->buffer_ofs++] = '\0';
582 static const struct xattr_iter_handlers list_xattr_handlers = {
583 .entry = xattr_entrylist,
584 .name = xattr_namelist,
585 .alloc_buffer = xattr_skipvalue,
589 static int inline_listxattr(struct listxattr_iter *it)
592 unsigned int remaining;
594 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
600 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
604 xattr_iter_end_final(&it->it);
605 return ret ? ret : it->buffer_ofs;
608 static int shared_listxattr(struct listxattr_iter *it)
610 struct inode *const inode = d_inode(it->dentry);
611 struct erofs_vnode *const vi = EROFS_V(inode);
612 struct super_block *const sb = inode->i_sb;
613 struct erofs_sb_info *const sbi = EROFS_SB(sb);
617 for (i = 0; i < vi->xattr_shared_count; ++i) {
618 erofs_blk_t blkaddr =
619 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
621 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
622 if (!i || blkaddr != it->it.blkaddr) {
624 xattr_iter_end(&it->it, true);
626 it->it.page = erofs_get_meta_page(sb, blkaddr, false);
627 if (IS_ERR(it->it.page))
628 return PTR_ERR(it->it.page);
630 it->it.kaddr = kmap_atomic(it->it.page);
631 it->it.blkaddr = blkaddr;
634 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
638 if (vi->xattr_shared_count)
639 xattr_iter_end_final(&it->it);
641 return ret ? ret : it->buffer_ofs;
644 ssize_t erofs_listxattr(struct dentry *dentry,
645 char *buffer, size_t buffer_size)
648 struct listxattr_iter it;
650 ret = init_inode_xattrs(d_inode(dentry));
656 it.buffer_size = buffer_size;
659 it.it.sb = dentry->d_sb;
661 ret = inline_listxattr(&it);
662 if (ret < 0 && ret != -ENOATTR)
664 return shared_listxattr(&it);
667 #ifdef CONFIG_EROFS_FS_POSIX_ACL
668 struct posix_acl *erofs_get_acl(struct inode *inode, int type)
670 struct posix_acl *acl;
675 case ACL_TYPE_ACCESS:
676 prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
678 case ACL_TYPE_DEFAULT:
679 prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
682 return ERR_PTR(-EINVAL);
685 rc = erofs_getxattr(inode, prefix, "", NULL, 0);
687 value = kmalloc(rc, GFP_KERNEL);
689 return ERR_PTR(-ENOMEM);
690 rc = erofs_getxattr(inode, prefix, "", value, rc);
698 acl = posix_acl_from_xattr(&init_user_ns, value, rc);