1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/xattr.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include <linux/security.h>
17 struct super_block *sb;
25 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
27 /* the only user of kunmap() is 'init_inode_xattrs' */
28 if (unlikely(!atomic))
31 kunmap_atomic(it->kaddr);
33 unlock_page(it->page);
37 static inline void xattr_iter_end_final(struct xattr_iter *it)
42 xattr_iter_end(it, true);
45 static int init_inode_xattrs(struct inode *inode)
47 struct erofs_vnode *const vi = EROFS_V(inode);
50 struct erofs_xattr_ibody_header *ih;
51 struct super_block *sb;
52 struct erofs_sb_info *sbi;
56 /* the most case is that xattrs of this inode are initialized. */
57 if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
60 if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
63 /* someone has initialized xattrs for us? */
64 if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
68 * bypass all xattr operations if ->xattr_isize is not greater than
69 * sizeof(struct erofs_xattr_ibody_header), in detail:
70 * 1) it is not enough to contain erofs_xattr_ibody_header then
71 * ->xattr_isize should be 0 (it means no xattr);
72 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
73 * undefined right now (maybe use later with some new sb feature).
75 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
76 errln("xattr_isize %d of nid %llu is not supported yet",
77 vi->xattr_isize, vi->nid);
80 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
81 if (unlikely(vi->xattr_isize)) {
84 goto out_unlock; /* xattr ondisk layout error */
92 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
93 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
95 it.page = erofs_get_inline_page(inode, it.blkaddr);
96 if (IS_ERR(it.page)) {
97 ret = PTR_ERR(it.page);
101 /* read in shared xattr array (non-atomic, see kmalloc below) */
102 it.kaddr = kmap(it.page);
105 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
107 vi->xattr_shared_count = ih->h_shared_count;
108 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
109 sizeof(uint), GFP_KERNEL);
110 if (!vi->xattr_shared_xattrs) {
111 xattr_iter_end(&it, atomic_map);
116 /* let's skip ibody header */
117 it.ofs += sizeof(struct erofs_xattr_ibody_header);
119 for (i = 0; i < vi->xattr_shared_count; ++i) {
120 if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
121 /* cannot be unaligned */
122 BUG_ON(it.ofs != EROFS_BLKSIZ);
123 xattr_iter_end(&it, atomic_map);
125 it.page = erofs_get_meta_page(sb, ++it.blkaddr,
126 S_ISDIR(inode->i_mode));
127 if (IS_ERR(it.page)) {
128 kfree(vi->xattr_shared_xattrs);
129 vi->xattr_shared_xattrs = NULL;
130 ret = PTR_ERR(it.page);
134 it.kaddr = kmap_atomic(it.page);
138 vi->xattr_shared_xattrs[i] =
139 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
140 it.ofs += sizeof(__le32);
142 xattr_iter_end(&it, atomic_map);
144 set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
147 clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
152 * the general idea for these return values is
153 * if 0 is returned, go on processing the current xattr;
154 * 1 (> 0) is returned, skip this round to process the next xattr;
155 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
156 * and need to be handled
158 struct xattr_iter_handlers {
159 int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
160 int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
162 int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
163 void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
167 static inline int xattr_iter_fixup(struct xattr_iter *it)
169 if (it->ofs < EROFS_BLKSIZ)
172 xattr_iter_end(it, true);
174 it->blkaddr += erofs_blknr(it->ofs);
176 it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
177 if (IS_ERR(it->page)) {
178 int err = PTR_ERR(it->page);
184 it->kaddr = kmap_atomic(it->page);
185 it->ofs = erofs_blkoff(it->ofs);
189 static int inline_xattr_iter_begin(struct xattr_iter *it,
192 struct erofs_vnode *const vi = EROFS_V(inode);
193 struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
194 unsigned int xattr_header_sz, inline_xattr_ofs;
196 xattr_header_sz = inlinexattr_header_size(inode);
197 if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
198 BUG_ON(xattr_header_sz > vi->xattr_isize);
202 inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
204 it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
205 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
207 it->page = erofs_get_inline_page(inode, it->blkaddr);
208 if (IS_ERR(it->page))
209 return PTR_ERR(it->page);
211 it->kaddr = kmap_atomic(it->page);
212 return vi->xattr_isize - xattr_header_sz;
216 * Regardless of success or failure, `xattr_foreach' will end up with
217 * `ofs' pointing to the next xattr item rather than an arbitrary position.
219 static int xattr_foreach(struct xattr_iter *it,
220 const struct xattr_iter_handlers *op,
221 unsigned int *tlimit)
223 struct erofs_xattr_entry entry;
224 unsigned int value_sz, processed, slice;
227 /* 0. fixup blkaddr, ofs, ipage */
228 err = xattr_iter_fixup(it);
233 * 1. read xattr entry to the memory,
234 * since we do EROFS_XATTR_ALIGN
235 * therefore entry should be in the page
237 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
239 unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
241 BUG_ON(*tlimit < entry_sz);
245 it->ofs += sizeof(struct erofs_xattr_entry);
246 value_sz = le16_to_cpu(entry.e_value_size);
249 err = op->entry(it, &entry);
251 it->ofs += entry.e_name_len + value_sz;
255 /* 2. handle xattr name (ofs will finally be at the end of name) */
258 while (processed < entry.e_name_len) {
259 if (it->ofs >= EROFS_BLKSIZ) {
260 BUG_ON(it->ofs > EROFS_BLKSIZ);
262 err = xattr_iter_fixup(it);
268 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
269 entry.e_name_len - processed);
272 err = op->name(it, processed, it->kaddr + it->ofs, slice);
274 it->ofs += entry.e_name_len - processed + value_sz;
282 /* 3. handle xattr value */
285 if (op->alloc_buffer) {
286 err = op->alloc_buffer(it, value_sz);
293 while (processed < value_sz) {
294 if (it->ofs >= EROFS_BLKSIZ) {
295 BUG_ON(it->ofs > EROFS_BLKSIZ);
297 err = xattr_iter_fixup(it);
303 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
304 value_sz - processed);
305 op->value(it, processed, it->kaddr + it->ofs, slice);
311 /* xattrs should be 4-byte aligned (on-disk constraint) */
312 it->ofs = EROFS_XATTR_ALIGN(it->ofs);
313 return err < 0 ? err : 0;
316 struct getxattr_iter {
317 struct xattr_iter it;
320 int buffer_size, index;
324 static int xattr_entrymatch(struct xattr_iter *_it,
325 struct erofs_xattr_entry *entry)
327 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
329 return (it->index != entry->e_name_index ||
330 it->name.len != entry->e_name_len) ? -ENOATTR : 0;
333 static int xattr_namematch(struct xattr_iter *_it,
334 unsigned int processed, char *buf, unsigned int len)
336 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
338 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
341 static int xattr_checkbuffer(struct xattr_iter *_it,
342 unsigned int value_sz)
344 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
345 int err = it->buffer_size < value_sz ? -ERANGE : 0;
347 it->buffer_size = value_sz;
348 return !it->buffer ? 1 : err;
351 static void xattr_copyvalue(struct xattr_iter *_it,
352 unsigned int processed,
353 char *buf, unsigned int len)
355 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
357 memcpy(it->buffer + processed, buf, len);
360 static const struct xattr_iter_handlers find_xattr_handlers = {
361 .entry = xattr_entrymatch,
362 .name = xattr_namematch,
363 .alloc_buffer = xattr_checkbuffer,
364 .value = xattr_copyvalue
367 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
370 unsigned int remaining;
372 ret = inline_xattr_iter_begin(&it->it, inode);
378 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
382 xattr_iter_end_final(&it->it);
384 return ret ? ret : it->buffer_size;
387 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
389 struct erofs_vnode *const vi = EROFS_V(inode);
390 struct super_block *const sb = inode->i_sb;
391 struct erofs_sb_info *const sbi = EROFS_SB(sb);
395 for (i = 0; i < vi->xattr_shared_count; ++i) {
396 erofs_blk_t blkaddr =
397 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
399 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
401 if (!i || blkaddr != it->it.blkaddr) {
403 xattr_iter_end(&it->it, true);
405 it->it.page = erofs_get_meta_page(sb, blkaddr, false);
406 if (IS_ERR(it->it.page))
407 return PTR_ERR(it->it.page);
409 it->it.kaddr = kmap_atomic(it->it.page);
410 it->it.blkaddr = blkaddr;
413 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
417 if (vi->xattr_shared_count)
418 xattr_iter_end_final(&it->it);
420 return ret ? ret : it->buffer_size;
423 static bool erofs_xattr_user_list(struct dentry *dentry)
425 return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
428 static bool erofs_xattr_trusted_list(struct dentry *dentry)
430 return capable(CAP_SYS_ADMIN);
433 int erofs_getxattr(struct inode *inode, int index,
435 void *buffer, size_t buffer_size)
438 struct getxattr_iter it;
443 ret = init_inode_xattrs(inode);
449 it.name.len = strlen(name);
450 if (it.name.len > EROFS_NAME_LEN)
455 it.buffer_size = buffer_size;
457 it.it.sb = inode->i_sb;
458 ret = inline_getxattr(inode, &it);
460 ret = shared_getxattr(inode, &it);
464 static int erofs_xattr_generic_get(const struct xattr_handler *handler,
465 struct dentry *unused, struct inode *inode,
466 const char *name, void *buffer, size_t size)
468 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
470 switch (handler->flags) {
471 case EROFS_XATTR_INDEX_USER:
472 if (!test_opt(sbi, XATTR_USER))
475 case EROFS_XATTR_INDEX_TRUSTED:
476 if (!capable(CAP_SYS_ADMIN))
479 case EROFS_XATTR_INDEX_SECURITY:
485 return erofs_getxattr(inode, handler->flags, name, buffer, size);
488 const struct xattr_handler erofs_xattr_user_handler = {
489 .prefix = XATTR_USER_PREFIX,
490 .flags = EROFS_XATTR_INDEX_USER,
491 .list = erofs_xattr_user_list,
492 .get = erofs_xattr_generic_get,
495 const struct xattr_handler erofs_xattr_trusted_handler = {
496 .prefix = XATTR_TRUSTED_PREFIX,
497 .flags = EROFS_XATTR_INDEX_TRUSTED,
498 .list = erofs_xattr_trusted_list,
499 .get = erofs_xattr_generic_get,
502 #ifdef CONFIG_EROFS_FS_SECURITY
503 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
504 .prefix = XATTR_SECURITY_PREFIX,
505 .flags = EROFS_XATTR_INDEX_SECURITY,
506 .get = erofs_xattr_generic_get,
510 const struct xattr_handler *erofs_xattr_handlers[] = {
511 &erofs_xattr_user_handler,
512 #ifdef CONFIG_EROFS_FS_POSIX_ACL
513 &posix_acl_access_xattr_handler,
514 &posix_acl_default_xattr_handler,
516 &erofs_xattr_trusted_handler,
517 #ifdef CONFIG_EROFS_FS_SECURITY
518 &erofs_xattr_security_handler,
523 struct listxattr_iter {
524 struct xattr_iter it;
526 struct dentry *dentry;
528 int buffer_size, buffer_ofs;
531 static int xattr_entrylist(struct xattr_iter *_it,
532 struct erofs_xattr_entry *entry)
534 struct listxattr_iter *it =
535 container_of(_it, struct listxattr_iter, it);
536 unsigned int prefix_len;
539 const struct xattr_handler *h =
540 erofs_xattr_handler(entry->e_name_index);
542 if (!h || (h->list && !h->list(it->dentry)))
545 prefix = xattr_prefix(h);
546 prefix_len = strlen(prefix);
549 it->buffer_ofs += prefix_len + entry->e_name_len + 1;
553 if (it->buffer_ofs + prefix_len
554 + entry->e_name_len + 1 > it->buffer_size)
557 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
558 it->buffer_ofs += prefix_len;
562 static int xattr_namelist(struct xattr_iter *_it,
563 unsigned int processed, char *buf, unsigned int len)
565 struct listxattr_iter *it =
566 container_of(_it, struct listxattr_iter, it);
568 memcpy(it->buffer + it->buffer_ofs, buf, len);
569 it->buffer_ofs += len;
573 static int xattr_skipvalue(struct xattr_iter *_it,
574 unsigned int value_sz)
576 struct listxattr_iter *it =
577 container_of(_it, struct listxattr_iter, it);
579 it->buffer[it->buffer_ofs++] = '\0';
583 static const struct xattr_iter_handlers list_xattr_handlers = {
584 .entry = xattr_entrylist,
585 .name = xattr_namelist,
586 .alloc_buffer = xattr_skipvalue,
590 static int inline_listxattr(struct listxattr_iter *it)
593 unsigned int remaining;
595 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
601 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
605 xattr_iter_end_final(&it->it);
606 return ret ? ret : it->buffer_ofs;
609 static int shared_listxattr(struct listxattr_iter *it)
611 struct inode *const inode = d_inode(it->dentry);
612 struct erofs_vnode *const vi = EROFS_V(inode);
613 struct super_block *const sb = inode->i_sb;
614 struct erofs_sb_info *const sbi = EROFS_SB(sb);
618 for (i = 0; i < vi->xattr_shared_count; ++i) {
619 erofs_blk_t blkaddr =
620 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
622 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
623 if (!i || blkaddr != it->it.blkaddr) {
625 xattr_iter_end(&it->it, true);
627 it->it.page = erofs_get_meta_page(sb, blkaddr, false);
628 if (IS_ERR(it->it.page))
629 return PTR_ERR(it->it.page);
631 it->it.kaddr = kmap_atomic(it->it.page);
632 it->it.blkaddr = blkaddr;
635 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
639 if (vi->xattr_shared_count)
640 xattr_iter_end_final(&it->it);
642 return ret ? ret : it->buffer_ofs;
645 ssize_t erofs_listxattr(struct dentry *dentry,
646 char *buffer, size_t buffer_size)
649 struct listxattr_iter it;
651 ret = init_inode_xattrs(d_inode(dentry));
657 it.buffer_size = buffer_size;
660 it.it.sb = dentry->d_sb;
662 ret = inline_listxattr(&it);
663 if (ret < 0 && ret != -ENOATTR)
665 return shared_listxattr(&it);
668 #ifdef CONFIG_EROFS_FS_POSIX_ACL
669 struct posix_acl *erofs_get_acl(struct inode *inode, int type)
671 struct posix_acl *acl;
676 case ACL_TYPE_ACCESS:
677 prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
679 case ACL_TYPE_DEFAULT:
680 prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
683 return ERR_PTR(-EINVAL);
686 rc = erofs_getxattr(inode, prefix, "", NULL, 0);
688 value = kmalloc(rc, GFP_KERNEL);
690 return ERR_PTR(-ENOMEM);
691 rc = erofs_getxattr(inode, prefix, "", value, rc);
699 acl = posix_acl_from_xattr(&init_user_ns, value, rc);