4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/uaccess.h>
24 #include <linux/proc_ns.h>
25 #include <linux/magic.h>
26 #include <linux/memblock.h>
27 #include <linux/task_work.h>
28 #include <linux/sched/task.h>
29 #include <uapi/linux/mount.h>
34 /* Maximum number of mounts in a mount namespace */
35 unsigned int sysctl_mount_max __read_mostly = 100000;
37 static unsigned int m_hash_mask __read_mostly;
38 static unsigned int m_hash_shift __read_mostly;
39 static unsigned int mp_hash_mask __read_mostly;
40 static unsigned int mp_hash_shift __read_mostly;
42 static __initdata unsigned long mhash_entries;
43 static int __init set_mhash_entries(char *str)
47 mhash_entries = simple_strtoul(str, &str, 0);
50 __setup("mhash_entries=", set_mhash_entries);
52 static __initdata unsigned long mphash_entries;
53 static int __init set_mphash_entries(char *str)
57 mphash_entries = simple_strtoul(str, &str, 0);
60 __setup("mphash_entries=", set_mphash_entries);
63 static DEFINE_IDA(mnt_id_ida);
64 static DEFINE_IDA(mnt_group_ida);
66 static struct hlist_head *mount_hashtable __read_mostly;
67 static struct hlist_head *mountpoint_hashtable __read_mostly;
68 static struct kmem_cache *mnt_cache __read_mostly;
69 static DECLARE_RWSEM(namespace_sem);
72 struct kobject *fs_kobj;
73 EXPORT_SYMBOL_GPL(fs_kobj);
76 * vfsmount lock may be taken for read to prevent changes to the
77 * vfsmount hash, ie. during mountpoint lookups or walking back
80 * It should be taken for write in all cases where the vfsmount
81 * tree or hash is modified or when a vfsmount structure is modified.
83 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
85 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
87 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
88 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
89 tmp = tmp + (tmp >> m_hash_shift);
90 return &mount_hashtable[tmp & m_hash_mask];
93 static inline struct hlist_head *mp_hash(struct dentry *dentry)
95 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
96 tmp = tmp + (tmp >> mp_hash_shift);
97 return &mountpoint_hashtable[tmp & mp_hash_mask];
100 static int mnt_alloc_id(struct mount *mnt)
102 int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
110 static void mnt_free_id(struct mount *mnt)
112 ida_free(&mnt_id_ida, mnt->mnt_id);
116 * Allocate a new peer group ID
118 static int mnt_alloc_group_id(struct mount *mnt)
120 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
124 mnt->mnt_group_id = res;
129 * Release a peer group ID
131 void mnt_release_group_id(struct mount *mnt)
133 ida_free(&mnt_group_ida, mnt->mnt_group_id);
134 mnt->mnt_group_id = 0;
138 * vfsmount lock must be held for read
140 static inline void mnt_add_count(struct mount *mnt, int n)
143 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
152 * vfsmount lock must be held for write
154 unsigned int mnt_get_count(struct mount *mnt)
157 unsigned int count = 0;
160 for_each_possible_cpu(cpu) {
161 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
166 return mnt->mnt_count;
170 static void drop_mountpoint(struct fs_pin *p)
172 struct mount *m = container_of(p, struct mount, mnt_umount);
173 dput(m->mnt_ex_mountpoint);
178 static struct mount *alloc_vfsmnt(const char *name)
180 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
184 err = mnt_alloc_id(mnt);
189 mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
190 if (!mnt->mnt_devname)
195 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
197 goto out_free_devname;
199 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
202 mnt->mnt_writers = 0;
205 INIT_HLIST_NODE(&mnt->mnt_hash);
206 INIT_LIST_HEAD(&mnt->mnt_child);
207 INIT_LIST_HEAD(&mnt->mnt_mounts);
208 INIT_LIST_HEAD(&mnt->mnt_list);
209 INIT_LIST_HEAD(&mnt->mnt_expire);
210 INIT_LIST_HEAD(&mnt->mnt_share);
211 INIT_LIST_HEAD(&mnt->mnt_slave_list);
212 INIT_LIST_HEAD(&mnt->mnt_slave);
213 INIT_HLIST_NODE(&mnt->mnt_mp_list);
214 INIT_LIST_HEAD(&mnt->mnt_umounting);
215 init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
221 kfree_const(mnt->mnt_devname);
226 kmem_cache_free(mnt_cache, mnt);
231 * Most r/o checks on a fs are for operations that take
232 * discrete amounts of time, like a write() or unlink().
233 * We must keep track of when those operations start
234 * (for permission checks) and when they end, so that
235 * we can determine when writes are able to occur to
239 * __mnt_is_readonly: check whether a mount is read-only
240 * @mnt: the mount to check for its write status
242 * This shouldn't be used directly ouside of the VFS.
243 * It does not guarantee that the filesystem will stay
244 * r/w, just that it is right *now*. This can not and
245 * should not be used in place of IS_RDONLY(inode).
246 * mnt_want/drop_write() will _keep_ the filesystem
249 int __mnt_is_readonly(struct vfsmount *mnt)
251 if (mnt->mnt_flags & MNT_READONLY)
253 if (sb_rdonly(mnt->mnt_sb))
257 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
259 static inline void mnt_inc_writers(struct mount *mnt)
262 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
268 static inline void mnt_dec_writers(struct mount *mnt)
271 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
277 static unsigned int mnt_get_writers(struct mount *mnt)
280 unsigned int count = 0;
283 for_each_possible_cpu(cpu) {
284 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
289 return mnt->mnt_writers;
293 static int mnt_is_readonly(struct vfsmount *mnt)
295 if (mnt->mnt_sb->s_readonly_remount)
297 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
299 return __mnt_is_readonly(mnt);
303 * Most r/o & frozen checks on a fs are for operations that take discrete
304 * amounts of time, like a write() or unlink(). We must keep track of when
305 * those operations start (for permission checks) and when they end, so that we
306 * can determine when writes are able to occur to a filesystem.
309 * __mnt_want_write - get write access to a mount without freeze protection
310 * @m: the mount on which to take a write
312 * This tells the low-level filesystem that a write is about to be performed to
313 * it, and makes sure that writes are allowed (mnt it read-write) before
314 * returning success. This operation does not protect against filesystem being
315 * frozen. When the write operation is finished, __mnt_drop_write() must be
316 * called. This is effectively a refcount.
318 int __mnt_want_write(struct vfsmount *m)
320 struct mount *mnt = real_mount(m);
324 mnt_inc_writers(mnt);
326 * The store to mnt_inc_writers must be visible before we pass
327 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
328 * incremented count after it has set MNT_WRITE_HOLD.
331 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
334 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
335 * be set to match its requirements. So we must not load that until
336 * MNT_WRITE_HOLD is cleared.
339 if (mnt_is_readonly(m)) {
340 mnt_dec_writers(mnt);
349 * mnt_want_write - get write access to a mount
350 * @m: the mount on which to take a write
352 * This tells the low-level filesystem that a write is about to be performed to
353 * it, and makes sure that writes are allowed (mount is read-write, filesystem
354 * is not frozen) before returning success. When the write operation is
355 * finished, mnt_drop_write() must be called. This is effectively a refcount.
357 int mnt_want_write(struct vfsmount *m)
361 sb_start_write(m->mnt_sb);
362 ret = __mnt_want_write(m);
364 sb_end_write(m->mnt_sb);
367 EXPORT_SYMBOL_GPL(mnt_want_write);
370 * mnt_clone_write - get write access to a mount
371 * @mnt: the mount on which to take a write
373 * This is effectively like mnt_want_write, except
374 * it must only be used to take an extra write reference
375 * on a mountpoint that we already know has a write reference
376 * on it. This allows some optimisation.
378 * After finished, mnt_drop_write must be called as usual to
379 * drop the reference.
381 int mnt_clone_write(struct vfsmount *mnt)
383 /* superblock may be r/o */
384 if (__mnt_is_readonly(mnt))
387 mnt_inc_writers(real_mount(mnt));
391 EXPORT_SYMBOL_GPL(mnt_clone_write);
394 * __mnt_want_write_file - get write access to a file's mount
395 * @file: the file who's mount on which to take a write
397 * This is like __mnt_want_write, but it takes a file and can
398 * do some optimisations if the file is open for write already
400 int __mnt_want_write_file(struct file *file)
402 if (!(file->f_mode & FMODE_WRITER))
403 return __mnt_want_write(file->f_path.mnt);
405 return mnt_clone_write(file->f_path.mnt);
409 * mnt_want_write_file - get write access to a file's mount
410 * @file: the file who's mount on which to take a write
412 * This is like mnt_want_write, but it takes a file and can
413 * do some optimisations if the file is open for write already
415 int mnt_want_write_file(struct file *file)
419 sb_start_write(file_inode(file)->i_sb);
420 ret = __mnt_want_write_file(file);
422 sb_end_write(file_inode(file)->i_sb);
425 EXPORT_SYMBOL_GPL(mnt_want_write_file);
428 * __mnt_drop_write - give up write access to a mount
429 * @mnt: the mount on which to give up write access
431 * Tells the low-level filesystem that we are done
432 * performing writes to it. Must be matched with
433 * __mnt_want_write() call above.
435 void __mnt_drop_write(struct vfsmount *mnt)
438 mnt_dec_writers(real_mount(mnt));
443 * mnt_drop_write - give up write access to a mount
444 * @mnt: the mount on which to give up write access
446 * Tells the low-level filesystem that we are done performing writes to it and
447 * also allows filesystem to be frozen again. Must be matched with
448 * mnt_want_write() call above.
450 void mnt_drop_write(struct vfsmount *mnt)
452 __mnt_drop_write(mnt);
453 sb_end_write(mnt->mnt_sb);
455 EXPORT_SYMBOL_GPL(mnt_drop_write);
457 void __mnt_drop_write_file(struct file *file)
459 __mnt_drop_write(file->f_path.mnt);
462 void mnt_drop_write_file(struct file *file)
464 __mnt_drop_write_file(file);
465 sb_end_write(file_inode(file)->i_sb);
467 EXPORT_SYMBOL(mnt_drop_write_file);
469 static int mnt_make_readonly(struct mount *mnt)
474 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
476 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
477 * should be visible before we do.
482 * With writers on hold, if this value is zero, then there are
483 * definitely no active writers (although held writers may subsequently
484 * increment the count, they'll have to wait, and decrement it after
485 * seeing MNT_READONLY).
487 * It is OK to have counter incremented on one CPU and decremented on
488 * another: the sum will add up correctly. The danger would be when we
489 * sum up each counter, if we read a counter before it is incremented,
490 * but then read another CPU's count which it has been subsequently
491 * decremented from -- we would see more decrements than we should.
492 * MNT_WRITE_HOLD protects against this scenario, because
493 * mnt_want_write first increments count, then smp_mb, then spins on
494 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
495 * we're counting up here.
497 if (mnt_get_writers(mnt) > 0)
500 mnt->mnt.mnt_flags |= MNT_READONLY;
502 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
503 * that become unheld will see MNT_READONLY.
506 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
511 static void __mnt_unmake_readonly(struct mount *mnt)
514 mnt->mnt.mnt_flags &= ~MNT_READONLY;
518 int sb_prepare_remount_readonly(struct super_block *sb)
523 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
524 if (atomic_long_read(&sb->s_remove_count))
528 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
529 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
530 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
532 if (mnt_get_writers(mnt) > 0) {
538 if (!err && atomic_long_read(&sb->s_remove_count))
542 sb->s_readonly_remount = 1;
545 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
546 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
547 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
554 static void free_vfsmnt(struct mount *mnt)
556 kfree_const(mnt->mnt_devname);
558 free_percpu(mnt->mnt_pcp);
560 kmem_cache_free(mnt_cache, mnt);
563 static void delayed_free_vfsmnt(struct rcu_head *head)
565 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
568 /* call under rcu_read_lock */
569 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
572 if (read_seqretry(&mount_lock, seq))
576 mnt = real_mount(bastard);
577 mnt_add_count(mnt, 1);
578 smp_mb(); // see mntput_no_expire()
579 if (likely(!read_seqretry(&mount_lock, seq)))
581 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
582 mnt_add_count(mnt, -1);
586 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
587 mnt_add_count(mnt, -1);
592 /* caller will mntput() */
596 /* call under rcu_read_lock */
597 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
599 int res = __legitimize_mnt(bastard, seq);
602 if (unlikely(res < 0)) {
611 * find the first mount at @dentry on vfsmount @mnt.
612 * call under rcu_read_lock()
614 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
616 struct hlist_head *head = m_hash(mnt, dentry);
619 hlist_for_each_entry_rcu(p, head, mnt_hash)
620 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
626 * lookup_mnt - Return the first child mount mounted at path
628 * "First" means first mounted chronologically. If you create the
631 * mount /dev/sda1 /mnt
632 * mount /dev/sda2 /mnt
633 * mount /dev/sda3 /mnt
635 * Then lookup_mnt() on the base /mnt dentry in the root mount will
636 * return successively the root dentry and vfsmount of /dev/sda1, then
637 * /dev/sda2, then /dev/sda3, then NULL.
639 * lookup_mnt takes a reference to the found vfsmount.
641 struct vfsmount *lookup_mnt(const struct path *path)
643 struct mount *child_mnt;
649 seq = read_seqbegin(&mount_lock);
650 child_mnt = __lookup_mnt(path->mnt, path->dentry);
651 m = child_mnt ? &child_mnt->mnt : NULL;
652 } while (!legitimize_mnt(m, seq));
658 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
659 * current mount namespace.
661 * The common case is dentries are not mountpoints at all and that
662 * test is handled inline. For the slow case when we are actually
663 * dealing with a mountpoint of some kind, walk through all of the
664 * mounts in the current mount namespace and test to see if the dentry
667 * The mount_hashtable is not usable in the context because we
668 * need to identify all mounts that may be in the current mount
669 * namespace not just a mount that happens to have some specified
672 bool __is_local_mountpoint(struct dentry *dentry)
674 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
676 bool is_covered = false;
678 if (!d_mountpoint(dentry))
681 down_read(&namespace_sem);
682 list_for_each_entry(mnt, &ns->list, mnt_list) {
683 is_covered = (mnt->mnt_mountpoint == dentry);
687 up_read(&namespace_sem);
692 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
694 struct hlist_head *chain = mp_hash(dentry);
695 struct mountpoint *mp;
697 hlist_for_each_entry(mp, chain, m_hash) {
698 if (mp->m_dentry == dentry) {
699 /* might be worth a WARN_ON() */
700 if (d_unlinked(dentry))
701 return ERR_PTR(-ENOENT);
709 static struct mountpoint *get_mountpoint(struct dentry *dentry)
711 struct mountpoint *mp, *new = NULL;
714 if (d_mountpoint(dentry)) {
716 read_seqlock_excl(&mount_lock);
717 mp = lookup_mountpoint(dentry);
718 read_sequnlock_excl(&mount_lock);
724 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
726 return ERR_PTR(-ENOMEM);
729 /* Exactly one processes may set d_mounted */
730 ret = d_set_mounted(dentry);
732 /* Someone else set d_mounted? */
736 /* The dentry is not available as a mountpoint? */
741 /* Add the new mountpoint to the hash table */
742 read_seqlock_excl(&mount_lock);
743 new->m_dentry = dentry;
745 hlist_add_head(&new->m_hash, mp_hash(dentry));
746 INIT_HLIST_HEAD(&new->m_list);
747 read_sequnlock_excl(&mount_lock);
756 static void put_mountpoint(struct mountpoint *mp)
758 if (!--mp->m_count) {
759 struct dentry *dentry = mp->m_dentry;
760 BUG_ON(!hlist_empty(&mp->m_list));
761 spin_lock(&dentry->d_lock);
762 dentry->d_flags &= ~DCACHE_MOUNTED;
763 spin_unlock(&dentry->d_lock);
764 hlist_del(&mp->m_hash);
769 static inline int check_mnt(struct mount *mnt)
771 return mnt->mnt_ns == current->nsproxy->mnt_ns;
775 * vfsmount lock must be held for write
777 static void touch_mnt_namespace(struct mnt_namespace *ns)
781 wake_up_interruptible(&ns->poll);
786 * vfsmount lock must be held for write
788 static void __touch_mnt_namespace(struct mnt_namespace *ns)
790 if (ns && ns->event != event) {
792 wake_up_interruptible(&ns->poll);
797 * vfsmount lock must be held for write
799 static void unhash_mnt(struct mount *mnt)
801 mnt->mnt_parent = mnt;
802 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
803 list_del_init(&mnt->mnt_child);
804 hlist_del_init_rcu(&mnt->mnt_hash);
805 hlist_del_init(&mnt->mnt_mp_list);
806 put_mountpoint(mnt->mnt_mp);
811 * vfsmount lock must be held for write
813 static void detach_mnt(struct mount *mnt, struct path *old_path)
815 old_path->dentry = mnt->mnt_mountpoint;
816 old_path->mnt = &mnt->mnt_parent->mnt;
821 * vfsmount lock must be held for write
823 static void umount_mnt(struct mount *mnt)
825 /* old mountpoint will be dropped when we can do that */
826 mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
831 * vfsmount lock must be held for write
833 void mnt_set_mountpoint(struct mount *mnt,
834 struct mountpoint *mp,
835 struct mount *child_mnt)
838 mnt_add_count(mnt, 1); /* essentially, that's mntget */
839 child_mnt->mnt_mountpoint = dget(mp->m_dentry);
840 child_mnt->mnt_parent = mnt;
841 child_mnt->mnt_mp = mp;
842 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
845 static void __attach_mnt(struct mount *mnt, struct mount *parent)
847 hlist_add_head_rcu(&mnt->mnt_hash,
848 m_hash(&parent->mnt, mnt->mnt_mountpoint));
849 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
853 * vfsmount lock must be held for write
855 static void attach_mnt(struct mount *mnt,
856 struct mount *parent,
857 struct mountpoint *mp)
859 mnt_set_mountpoint(parent, mp, mnt);
860 __attach_mnt(mnt, parent);
863 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
865 struct mountpoint *old_mp = mnt->mnt_mp;
866 struct dentry *old_mountpoint = mnt->mnt_mountpoint;
867 struct mount *old_parent = mnt->mnt_parent;
869 list_del_init(&mnt->mnt_child);
870 hlist_del_init(&mnt->mnt_mp_list);
871 hlist_del_init_rcu(&mnt->mnt_hash);
873 attach_mnt(mnt, parent, mp);
875 put_mountpoint(old_mp);
878 * Safely avoid even the suggestion this code might sleep or
879 * lock the mount hash by taking advantage of the knowledge that
880 * mnt_change_mountpoint will not release the final reference
883 * During mounting, the mount passed in as the parent mount will
884 * continue to use the old mountpoint and during unmounting, the
885 * old mountpoint will continue to exist until namespace_unlock,
886 * which happens well after mnt_change_mountpoint.
888 spin_lock(&old_mountpoint->d_lock);
889 old_mountpoint->d_lockref.count--;
890 spin_unlock(&old_mountpoint->d_lock);
892 mnt_add_count(old_parent, -1);
896 * vfsmount lock must be held for write
898 static void commit_tree(struct mount *mnt)
900 struct mount *parent = mnt->mnt_parent;
903 struct mnt_namespace *n = parent->mnt_ns;
905 BUG_ON(parent == mnt);
907 list_add_tail(&head, &mnt->mnt_list);
908 list_for_each_entry(m, &head, mnt_list)
911 list_splice(&head, n->list.prev);
913 n->mounts += n->pending_mounts;
914 n->pending_mounts = 0;
916 __attach_mnt(mnt, parent);
917 touch_mnt_namespace(n);
920 static struct mount *next_mnt(struct mount *p, struct mount *root)
922 struct list_head *next = p->mnt_mounts.next;
923 if (next == &p->mnt_mounts) {
927 next = p->mnt_child.next;
928 if (next != &p->mnt_parent->mnt_mounts)
933 return list_entry(next, struct mount, mnt_child);
936 static struct mount *skip_mnt_tree(struct mount *p)
938 struct list_head *prev = p->mnt_mounts.prev;
939 while (prev != &p->mnt_mounts) {
940 p = list_entry(prev, struct mount, mnt_child);
941 prev = p->mnt_mounts.prev;
947 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
953 return ERR_PTR(-ENODEV);
955 mnt = alloc_vfsmnt(name);
957 return ERR_PTR(-ENOMEM);
959 if (flags & SB_KERNMOUNT)
960 mnt->mnt.mnt_flags = MNT_INTERNAL;
962 root = mount_fs(type, flags, name, data);
966 return ERR_CAST(root);
969 mnt->mnt.mnt_root = root;
970 mnt->mnt.mnt_sb = root->d_sb;
971 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
972 mnt->mnt_parent = mnt;
974 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
978 EXPORT_SYMBOL_GPL(vfs_kern_mount);
981 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
982 const char *name, void *data)
984 /* Until it is worked out how to pass the user namespace
985 * through from the parent mount to the submount don't support
986 * unprivileged mounts with submounts.
988 if (mountpoint->d_sb->s_user_ns != &init_user_ns)
989 return ERR_PTR(-EPERM);
991 return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
993 EXPORT_SYMBOL_GPL(vfs_submount);
995 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
998 struct super_block *sb = old->mnt.mnt_sb;
1002 mnt = alloc_vfsmnt(old->mnt_devname);
1004 return ERR_PTR(-ENOMEM);
1006 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1007 mnt->mnt_group_id = 0; /* not a peer of original */
1009 mnt->mnt_group_id = old->mnt_group_id;
1011 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1012 err = mnt_alloc_group_id(mnt);
1017 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1018 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1019 /* Don't allow unprivileged users to change mount flags */
1020 if (flag & CL_UNPRIVILEGED) {
1021 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
1023 if (mnt->mnt.mnt_flags & MNT_READONLY)
1024 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
1026 if (mnt->mnt.mnt_flags & MNT_NODEV)
1027 mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
1029 if (mnt->mnt.mnt_flags & MNT_NOSUID)
1030 mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
1032 if (mnt->mnt.mnt_flags & MNT_NOEXEC)
1033 mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
1036 /* Don't allow unprivileged users to reveal what is under a mount */
1037 if ((flag & CL_UNPRIVILEGED) &&
1038 (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
1039 mnt->mnt.mnt_flags |= MNT_LOCKED;
1041 atomic_inc(&sb->s_active);
1042 mnt->mnt.mnt_sb = sb;
1043 mnt->mnt.mnt_root = dget(root);
1044 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1045 mnt->mnt_parent = mnt;
1047 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1048 unlock_mount_hash();
1050 if ((flag & CL_SLAVE) ||
1051 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1052 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1053 mnt->mnt_master = old;
1054 CLEAR_MNT_SHARED(mnt);
1055 } else if (!(flag & CL_PRIVATE)) {
1056 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1057 list_add(&mnt->mnt_share, &old->mnt_share);
1058 if (IS_MNT_SLAVE(old))
1059 list_add(&mnt->mnt_slave, &old->mnt_slave);
1060 mnt->mnt_master = old->mnt_master;
1062 CLEAR_MNT_SHARED(mnt);
1064 if (flag & CL_MAKE_SHARED)
1065 set_mnt_shared(mnt);
1067 /* stick the duplicate mount on the same expiry list
1068 * as the original if that was on one */
1069 if (flag & CL_EXPIRE) {
1070 if (!list_empty(&old->mnt_expire))
1071 list_add(&mnt->mnt_expire, &old->mnt_expire);
1079 return ERR_PTR(err);
1082 static void cleanup_mnt(struct mount *mnt)
1085 * This probably indicates that somebody messed
1086 * up a mnt_want/drop_write() pair. If this
1087 * happens, the filesystem was probably unable
1088 * to make r/w->r/o transitions.
1091 * The locking used to deal with mnt_count decrement provides barriers,
1092 * so mnt_get_writers() below is safe.
1094 WARN_ON(mnt_get_writers(mnt));
1095 if (unlikely(mnt->mnt_pins.first))
1097 fsnotify_vfsmount_delete(&mnt->mnt);
1098 dput(mnt->mnt.mnt_root);
1099 deactivate_super(mnt->mnt.mnt_sb);
1101 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1104 static void __cleanup_mnt(struct rcu_head *head)
1106 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1109 static LLIST_HEAD(delayed_mntput_list);
1110 static void delayed_mntput(struct work_struct *unused)
1112 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1113 struct mount *m, *t;
1115 llist_for_each_entry_safe(m, t, node, mnt_llist)
1118 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1120 static void mntput_no_expire(struct mount *mnt)
1123 if (likely(READ_ONCE(mnt->mnt_ns))) {
1125 * Since we don't do lock_mount_hash() here,
1126 * ->mnt_ns can change under us. However, if it's
1127 * non-NULL, then there's a reference that won't
1128 * be dropped until after an RCU delay done after
1129 * turning ->mnt_ns NULL. So if we observe it
1130 * non-NULL under rcu_read_lock(), the reference
1131 * we are dropping is not the final one.
1133 mnt_add_count(mnt, -1);
1139 * make sure that if __legitimize_mnt() has not seen us grab
1140 * mount_lock, we'll see their refcount increment here.
1143 mnt_add_count(mnt, -1);
1144 if (mnt_get_count(mnt)) {
1146 unlock_mount_hash();
1149 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1151 unlock_mount_hash();
1154 mnt->mnt.mnt_flags |= MNT_DOOMED;
1157 list_del(&mnt->mnt_instance);
1159 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1160 struct mount *p, *tmp;
1161 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1165 unlock_mount_hash();
1167 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1168 struct task_struct *task = current;
1169 if (likely(!(task->flags & PF_KTHREAD))) {
1170 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1171 if (!task_work_add(task, &mnt->mnt_rcu, true))
1174 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1175 schedule_delayed_work(&delayed_mntput_work, 1);
1181 void mntput(struct vfsmount *mnt)
1184 struct mount *m = real_mount(mnt);
1185 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1186 if (unlikely(m->mnt_expiry_mark))
1187 m->mnt_expiry_mark = 0;
1188 mntput_no_expire(m);
1191 EXPORT_SYMBOL(mntput);
1193 struct vfsmount *mntget(struct vfsmount *mnt)
1196 mnt_add_count(real_mount(mnt), 1);
1199 EXPORT_SYMBOL(mntget);
1201 /* path_is_mountpoint() - Check if path is a mount in the current
1204 * d_mountpoint() can only be used reliably to establish if a dentry is
1205 * not mounted in any namespace and that common case is handled inline.
1206 * d_mountpoint() isn't aware of the possibility there may be multiple
1207 * mounts using a given dentry in a different namespace. This function
1208 * checks if the passed in path is a mountpoint rather than the dentry
1211 bool path_is_mountpoint(const struct path *path)
1216 if (!d_mountpoint(path->dentry))
1221 seq = read_seqbegin(&mount_lock);
1222 res = __path_is_mountpoint(path);
1223 } while (read_seqretry(&mount_lock, seq));
1228 EXPORT_SYMBOL(path_is_mountpoint);
1230 struct vfsmount *mnt_clone_internal(const struct path *path)
1233 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1236 p->mnt.mnt_flags |= MNT_INTERNAL;
1240 #ifdef CONFIG_PROC_FS
1241 /* iterator; we want it to have access to namespace_sem, thus here... */
1242 static void *m_start(struct seq_file *m, loff_t *pos)
1244 struct proc_mounts *p = m->private;
1246 down_read(&namespace_sem);
1247 if (p->cached_event == p->ns->event) {
1248 void *v = p->cached_mount;
1249 if (*pos == p->cached_index)
1251 if (*pos == p->cached_index + 1) {
1252 v = seq_list_next(v, &p->ns->list, &p->cached_index);
1253 return p->cached_mount = v;
1257 p->cached_event = p->ns->event;
1258 p->cached_mount = seq_list_start(&p->ns->list, *pos);
1259 p->cached_index = *pos;
1260 return p->cached_mount;
1263 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1265 struct proc_mounts *p = m->private;
1267 p->cached_mount = seq_list_next(v, &p->ns->list, pos);
1268 p->cached_index = *pos;
1269 return p->cached_mount;
1272 static void m_stop(struct seq_file *m, void *v)
1274 up_read(&namespace_sem);
1277 static int m_show(struct seq_file *m, void *v)
1279 struct proc_mounts *p = m->private;
1280 struct mount *r = list_entry(v, struct mount, mnt_list);
1281 return p->show(m, &r->mnt);
1284 const struct seq_operations mounts_op = {
1290 #endif /* CONFIG_PROC_FS */
1293 * may_umount_tree - check if a mount tree is busy
1294 * @mnt: root of mount tree
1296 * This is called to check if a tree of mounts has any
1297 * open files, pwds, chroots or sub mounts that are
1300 int may_umount_tree(struct vfsmount *m)
1302 struct mount *mnt = real_mount(m);
1303 int actual_refs = 0;
1304 int minimum_refs = 0;
1308 /* write lock needed for mnt_get_count */
1310 for (p = mnt; p; p = next_mnt(p, mnt)) {
1311 actual_refs += mnt_get_count(p);
1314 unlock_mount_hash();
1316 if (actual_refs > minimum_refs)
1322 EXPORT_SYMBOL(may_umount_tree);
1325 * may_umount - check if a mount point is busy
1326 * @mnt: root of mount
1328 * This is called to check if a mount point has any
1329 * open files, pwds, chroots or sub mounts. If the
1330 * mount has sub mounts this will return busy
1331 * regardless of whether the sub mounts are busy.
1333 * Doesn't take quota and stuff into account. IOW, in some cases it will
1334 * give false negatives. The main reason why it's here is that we need
1335 * a non-destructive way to look for easily umountable filesystems.
1337 int may_umount(struct vfsmount *mnt)
1340 down_read(&namespace_sem);
1342 if (propagate_mount_busy(real_mount(mnt), 2))
1344 unlock_mount_hash();
1345 up_read(&namespace_sem);
1349 EXPORT_SYMBOL(may_umount);
1351 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
1353 static void namespace_unlock(void)
1355 struct hlist_head head;
1357 hlist_move_list(&unmounted, &head);
1359 up_write(&namespace_sem);
1361 if (likely(hlist_empty(&head)))
1366 group_pin_kill(&head);
1369 static inline void namespace_lock(void)
1371 down_write(&namespace_sem);
1374 enum umount_tree_flags {
1376 UMOUNT_PROPAGATE = 2,
1377 UMOUNT_CONNECTED = 4,
1380 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1382 /* Leaving mounts connected is only valid for lazy umounts */
1383 if (how & UMOUNT_SYNC)
1386 /* A mount without a parent has nothing to be connected to */
1387 if (!mnt_has_parent(mnt))
1390 /* Because the reference counting rules change when mounts are
1391 * unmounted and connected, umounted mounts may not be
1392 * connected to mounted mounts.
1394 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1397 /* Has it been requested that the mount remain connected? */
1398 if (how & UMOUNT_CONNECTED)
1401 /* Is the mount locked such that it needs to remain connected? */
1402 if (IS_MNT_LOCKED(mnt))
1405 /* By default disconnect the mount */
1410 * mount_lock must be held
1411 * namespace_sem must be held for write
1413 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1415 LIST_HEAD(tmp_list);
1418 if (how & UMOUNT_PROPAGATE)
1419 propagate_mount_unlock(mnt);
1421 /* Gather the mounts to umount */
1422 for (p = mnt; p; p = next_mnt(p, mnt)) {
1423 p->mnt.mnt_flags |= MNT_UMOUNT;
1424 list_move(&p->mnt_list, &tmp_list);
1427 /* Hide the mounts from mnt_mounts */
1428 list_for_each_entry(p, &tmp_list, mnt_list) {
1429 list_del_init(&p->mnt_child);
1432 /* Add propogated mounts to the tmp_list */
1433 if (how & UMOUNT_PROPAGATE)
1434 propagate_umount(&tmp_list);
1436 while (!list_empty(&tmp_list)) {
1437 struct mnt_namespace *ns;
1439 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1440 list_del_init(&p->mnt_expire);
1441 list_del_init(&p->mnt_list);
1445 __touch_mnt_namespace(ns);
1448 if (how & UMOUNT_SYNC)
1449 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1451 disconnect = disconnect_mount(p, how);
1453 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
1454 disconnect ? &unmounted : NULL);
1455 if (mnt_has_parent(p)) {
1456 mnt_add_count(p->mnt_parent, -1);
1458 /* Don't forget about p */
1459 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1464 change_mnt_propagation(p, MS_PRIVATE);
1468 static void shrink_submounts(struct mount *mnt);
1470 static int do_umount(struct mount *mnt, int flags)
1472 struct super_block *sb = mnt->mnt.mnt_sb;
1475 retval = security_sb_umount(&mnt->mnt, flags);
1480 * Allow userspace to request a mountpoint be expired rather than
1481 * unmounting unconditionally. Unmount only happens if:
1482 * (1) the mark is already set (the mark is cleared by mntput())
1483 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1485 if (flags & MNT_EXPIRE) {
1486 if (&mnt->mnt == current->fs->root.mnt ||
1487 flags & (MNT_FORCE | MNT_DETACH))
1491 * probably don't strictly need the lock here if we examined
1492 * all race cases, but it's a slowpath.
1495 if (mnt_get_count(mnt) != 2) {
1496 unlock_mount_hash();
1499 unlock_mount_hash();
1501 if (!xchg(&mnt->mnt_expiry_mark, 1))
1506 * If we may have to abort operations to get out of this
1507 * mount, and they will themselves hold resources we must
1508 * allow the fs to do things. In the Unix tradition of
1509 * 'Gee thats tricky lets do it in userspace' the umount_begin
1510 * might fail to complete on the first run through as other tasks
1511 * must return, and the like. Thats for the mount program to worry
1512 * about for the moment.
1515 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1516 sb->s_op->umount_begin(sb);
1520 * No sense to grab the lock for this test, but test itself looks
1521 * somewhat bogus. Suggestions for better replacement?
1522 * Ho-hum... In principle, we might treat that as umount + switch
1523 * to rootfs. GC would eventually take care of the old vfsmount.
1524 * Actually it makes sense, especially if rootfs would contain a
1525 * /reboot - static binary that would close all descriptors and
1526 * call reboot(9). Then init(8) could umount root and exec /reboot.
1528 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1530 * Special case for "unmounting" root ...
1531 * we just try to remount it readonly.
1533 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1535 down_write(&sb->s_umount);
1537 retval = do_remount_sb(sb, SB_RDONLY, NULL, 0);
1538 up_write(&sb->s_umount);
1546 if (flags & MNT_DETACH) {
1547 if (!list_empty(&mnt->mnt_list))
1548 umount_tree(mnt, UMOUNT_PROPAGATE);
1551 shrink_submounts(mnt);
1553 if (!propagate_mount_busy(mnt, 2)) {
1554 if (!list_empty(&mnt->mnt_list))
1555 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1559 unlock_mount_hash();
1565 * __detach_mounts - lazily unmount all mounts on the specified dentry
1567 * During unlink, rmdir, and d_drop it is possible to loose the path
1568 * to an existing mountpoint, and wind up leaking the mount.
1569 * detach_mounts allows lazily unmounting those mounts instead of
1572 * The caller may hold dentry->d_inode->i_mutex.
1574 void __detach_mounts(struct dentry *dentry)
1576 struct mountpoint *mp;
1581 mp = lookup_mountpoint(dentry);
1582 if (IS_ERR_OR_NULL(mp))
1586 while (!hlist_empty(&mp->m_list)) {
1587 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1588 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1589 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
1592 else umount_tree(mnt, UMOUNT_CONNECTED);
1596 unlock_mount_hash();
1601 * Is the caller allowed to modify his namespace?
1603 static inline bool may_mount(void)
1605 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1608 static inline bool may_mandlock(void)
1610 #ifndef CONFIG_MANDATORY_FILE_LOCKING
1613 return capable(CAP_SYS_ADMIN);
1617 * Now umount can handle mount points as well as block devices.
1618 * This is important for filesystems which use unnamed block devices.
1620 * We now support a flag for forced unmount like the other 'big iron'
1621 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1624 int ksys_umount(char __user *name, int flags)
1629 int lookup_flags = 0;
1631 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1637 if (!(flags & UMOUNT_NOFOLLOW))
1638 lookup_flags |= LOOKUP_FOLLOW;
1640 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
1643 mnt = real_mount(path.mnt);
1645 if (path.dentry != path.mnt->mnt_root)
1647 if (!check_mnt(mnt))
1649 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1652 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1655 retval = do_umount(mnt, flags);
1657 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1659 mntput_no_expire(mnt);
1664 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1666 return ksys_umount(name, flags);
1669 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1672 * The 2.0 compatible umount. No flags.
1674 SYSCALL_DEFINE1(oldumount, char __user *, name)
1676 return ksys_umount(name, 0);
1681 static bool is_mnt_ns_file(struct dentry *dentry)
1683 /* Is this a proxy for a mount namespace? */
1684 return dentry->d_op == &ns_dentry_operations &&
1685 dentry->d_fsdata == &mntns_operations;
1688 struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1690 return container_of(ns, struct mnt_namespace, ns);
1693 static bool mnt_ns_loop(struct dentry *dentry)
1695 /* Could bind mounting the mount namespace inode cause a
1696 * mount namespace loop?
1698 struct mnt_namespace *mnt_ns;
1699 if (!is_mnt_ns_file(dentry))
1702 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1703 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1706 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1709 struct mount *res, *p, *q, *r, *parent;
1711 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1712 return ERR_PTR(-EINVAL);
1714 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1715 return ERR_PTR(-EINVAL);
1717 res = q = clone_mnt(mnt, dentry, flag);
1721 q->mnt_mountpoint = mnt->mnt_mountpoint;
1724 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1726 if (!is_subdir(r->mnt_mountpoint, dentry))
1729 for (s = r; s; s = next_mnt(s, r)) {
1730 if (!(flag & CL_COPY_UNBINDABLE) &&
1731 IS_MNT_UNBINDABLE(s)) {
1732 s = skip_mnt_tree(s);
1735 if (!(flag & CL_COPY_MNT_NS_FILE) &&
1736 is_mnt_ns_file(s->mnt.mnt_root)) {
1737 s = skip_mnt_tree(s);
1740 while (p != s->mnt_parent) {
1746 q = clone_mnt(p, p->mnt.mnt_root, flag);
1750 list_add_tail(&q->mnt_list, &res->mnt_list);
1751 attach_mnt(q, parent, p->mnt_mp);
1752 unlock_mount_hash();
1759 umount_tree(res, UMOUNT_SYNC);
1760 unlock_mount_hash();
1765 /* Caller should check returned pointer for errors */
1767 struct vfsmount *collect_mounts(const struct path *path)
1771 if (!check_mnt(real_mount(path->mnt)))
1772 tree = ERR_PTR(-EINVAL);
1774 tree = copy_tree(real_mount(path->mnt), path->dentry,
1775 CL_COPY_ALL | CL_PRIVATE);
1778 return ERR_CAST(tree);
1782 void drop_collected_mounts(struct vfsmount *mnt)
1786 umount_tree(real_mount(mnt), UMOUNT_SYNC);
1787 unlock_mount_hash();
1792 * clone_private_mount - create a private clone of a path
1794 * This creates a new vfsmount, which will be the clone of @path. The new will
1795 * not be attached anywhere in the namespace and will be private (i.e. changes
1796 * to the originating mount won't be propagated into this).
1798 * Release with mntput().
1800 struct vfsmount *clone_private_mount(const struct path *path)
1802 struct mount *old_mnt = real_mount(path->mnt);
1803 struct mount *new_mnt;
1805 if (IS_MNT_UNBINDABLE(old_mnt))
1806 return ERR_PTR(-EINVAL);
1808 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
1809 if (IS_ERR(new_mnt))
1810 return ERR_CAST(new_mnt);
1812 return &new_mnt->mnt;
1814 EXPORT_SYMBOL_GPL(clone_private_mount);
1816 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1817 struct vfsmount *root)
1820 int res = f(root, arg);
1823 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1824 res = f(&mnt->mnt, arg);
1831 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1835 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1836 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1837 mnt_release_group_id(p);
1841 static int invent_group_ids(struct mount *mnt, bool recurse)
1845 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1846 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1847 int err = mnt_alloc_group_id(p);
1849 cleanup_group_ids(mnt, p);
1858 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
1860 unsigned int max = READ_ONCE(sysctl_mount_max);
1861 unsigned int mounts = 0, old, pending, sum;
1864 for (p = mnt; p; p = next_mnt(p, mnt))
1868 pending = ns->pending_mounts;
1869 sum = old + pending;
1873 (mounts > (max - sum)))
1876 ns->pending_mounts = pending + mounts;
1881 * @source_mnt : mount tree to be attached
1882 * @nd : place the mount tree @source_mnt is attached
1883 * @parent_nd : if non-null, detach the source_mnt from its parent and
1884 * store the parent mount and mountpoint dentry.
1885 * (done when source_mnt is moved)
1887 * NOTE: in the table below explains the semantics when a source mount
1888 * of a given type is attached to a destination mount of a given type.
1889 * ---------------------------------------------------------------------------
1890 * | BIND MOUNT OPERATION |
1891 * |**************************************************************************
1892 * | source-->| shared | private | slave | unbindable |
1896 * |**************************************************************************
1897 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1899 * |non-shared| shared (+) | private | slave (*) | invalid |
1900 * ***************************************************************************
1901 * A bind operation clones the source mount and mounts the clone on the
1902 * destination mount.
1904 * (++) the cloned mount is propagated to all the mounts in the propagation
1905 * tree of the destination mount and the cloned mount is added to
1906 * the peer group of the source mount.
1907 * (+) the cloned mount is created under the destination mount and is marked
1908 * as shared. The cloned mount is added to the peer group of the source
1910 * (+++) the mount is propagated to all the mounts in the propagation tree
1911 * of the destination mount and the cloned mount is made slave
1912 * of the same master as that of the source mount. The cloned mount
1913 * is marked as 'shared and slave'.
1914 * (*) the cloned mount is made a slave of the same master as that of the
1917 * ---------------------------------------------------------------------------
1918 * | MOVE MOUNT OPERATION |
1919 * |**************************************************************************
1920 * | source-->| shared | private | slave | unbindable |
1924 * |**************************************************************************
1925 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1927 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1928 * ***************************************************************************
1930 * (+) the mount is moved to the destination. And is then propagated to
1931 * all the mounts in the propagation tree of the destination mount.
1932 * (+*) the mount is moved to the destination.
1933 * (+++) the mount is moved to the destination and is then propagated to
1934 * all the mounts belonging to the destination mount's propagation tree.
1935 * the mount is marked as 'shared and slave'.
1936 * (*) the mount continues to be a slave at the new location.
1938 * if the source mount is a tree, the operations explained above is
1939 * applied to each mount in the tree.
1940 * Must be called without spinlocks held, since this function can sleep
1943 static int attach_recursive_mnt(struct mount *source_mnt,
1944 struct mount *dest_mnt,
1945 struct mountpoint *dest_mp,
1946 struct path *parent_path)
1948 HLIST_HEAD(tree_list);
1949 struct mnt_namespace *ns = dest_mnt->mnt_ns;
1950 struct mountpoint *smp;
1951 struct mount *child, *p;
1952 struct hlist_node *n;
1955 /* Preallocate a mountpoint in case the new mounts need
1956 * to be tucked under other mounts.
1958 smp = get_mountpoint(source_mnt->mnt.mnt_root);
1960 return PTR_ERR(smp);
1962 /* Is there space to add these mounts to the mount namespace? */
1964 err = count_mounts(ns, source_mnt);
1969 if (IS_MNT_SHARED(dest_mnt)) {
1970 err = invent_group_ids(source_mnt, true);
1973 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
1976 goto out_cleanup_ids;
1977 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1983 detach_mnt(source_mnt, parent_path);
1984 attach_mnt(source_mnt, dest_mnt, dest_mp);
1985 touch_mnt_namespace(source_mnt->mnt_ns);
1987 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
1988 commit_tree(source_mnt);
1991 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
1993 hlist_del_init(&child->mnt_hash);
1994 q = __lookup_mnt(&child->mnt_parent->mnt,
1995 child->mnt_mountpoint);
1997 mnt_change_mountpoint(child, smp, q);
2000 put_mountpoint(smp);
2001 unlock_mount_hash();
2006 while (!hlist_empty(&tree_list)) {
2007 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2008 child->mnt_parent->mnt_ns->pending_mounts = 0;
2009 umount_tree(child, UMOUNT_SYNC);
2011 unlock_mount_hash();
2012 cleanup_group_ids(source_mnt, NULL);
2014 ns->pending_mounts = 0;
2016 read_seqlock_excl(&mount_lock);
2017 put_mountpoint(smp);
2018 read_sequnlock_excl(&mount_lock);
2023 static struct mountpoint *lock_mount(struct path *path)
2025 struct vfsmount *mnt;
2026 struct dentry *dentry = path->dentry;
2028 inode_lock(dentry->d_inode);
2029 if (unlikely(cant_mount(dentry))) {
2030 inode_unlock(dentry->d_inode);
2031 return ERR_PTR(-ENOENT);
2034 mnt = lookup_mnt(path);
2036 struct mountpoint *mp = get_mountpoint(dentry);
2039 inode_unlock(dentry->d_inode);
2045 inode_unlock(path->dentry->d_inode);
2048 dentry = path->dentry = dget(mnt->mnt_root);
2052 static void unlock_mount(struct mountpoint *where)
2054 struct dentry *dentry = where->m_dentry;
2056 read_seqlock_excl(&mount_lock);
2057 put_mountpoint(where);
2058 read_sequnlock_excl(&mount_lock);
2061 inode_unlock(dentry->d_inode);
2064 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2066 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2069 if (d_is_dir(mp->m_dentry) !=
2070 d_is_dir(mnt->mnt.mnt_root))
2073 return attach_recursive_mnt(mnt, p, mp, NULL);
2077 * Sanity check the flags to change_mnt_propagation.
2080 static int flags_to_propagation_type(int ms_flags)
2082 int type = ms_flags & ~(MS_REC | MS_SILENT);
2084 /* Fail if any non-propagation flags are set */
2085 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2087 /* Only one propagation flag should be set */
2088 if (!is_power_of_2(type))
2094 * recursively change the type of the mountpoint.
2096 static int do_change_type(struct path *path, int ms_flags)
2099 struct mount *mnt = real_mount(path->mnt);
2100 int recurse = ms_flags & MS_REC;
2104 if (path->dentry != path->mnt->mnt_root)
2107 type = flags_to_propagation_type(ms_flags);
2112 if (type == MS_SHARED) {
2113 err = invent_group_ids(mnt, recurse);
2119 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2120 change_mnt_propagation(m, type);
2121 unlock_mount_hash();
2128 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2130 struct mount *child;
2131 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2132 if (!is_subdir(child->mnt_mountpoint, dentry))
2135 if (child->mnt.mnt_flags & MNT_LOCKED)
2142 * do loopback mount.
2144 static int do_loopback(struct path *path, const char *old_name,
2147 struct path old_path;
2148 struct mount *mnt = NULL, *old, *parent;
2149 struct mountpoint *mp;
2151 if (!old_name || !*old_name)
2153 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2158 if (mnt_ns_loop(old_path.dentry))
2161 mp = lock_mount(path);
2166 old = real_mount(old_path.mnt);
2167 parent = real_mount(path->mnt);
2170 if (IS_MNT_UNBINDABLE(old))
2173 if (!check_mnt(parent))
2176 if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations)
2179 if (!recurse && has_locked_children(old, old_path.dentry))
2183 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE);
2185 mnt = clone_mnt(old, old_path.dentry, 0);
2192 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2194 err = graft_tree(mnt, parent, mp);
2197 umount_tree(mnt, UMOUNT_SYNC);
2198 unlock_mount_hash();
2203 path_put(&old_path);
2207 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
2210 int readonly_request = 0;
2212 if (ms_flags & MS_RDONLY)
2213 readonly_request = 1;
2214 if (readonly_request == __mnt_is_readonly(mnt))
2217 if (readonly_request)
2218 error = mnt_make_readonly(real_mount(mnt));
2220 __mnt_unmake_readonly(real_mount(mnt));
2225 * change filesystem flags. dir should be a physical root of filesystem.
2226 * If you've mounted a non-root directory somewhere and want to do remount
2227 * on it - tough luck.
2229 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2230 int mnt_flags, void *data)
2233 struct super_block *sb = path->mnt->mnt_sb;
2234 struct mount *mnt = real_mount(path->mnt);
2236 if (!check_mnt(mnt))
2239 if (path->dentry != path->mnt->mnt_root)
2242 /* Don't allow changing of locked mnt flags.
2244 * No locks need to be held here while testing the various
2245 * MNT_LOCK flags because those flags can never be cleared
2246 * once they are set.
2248 if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
2249 !(mnt_flags & MNT_READONLY)) {
2252 if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
2253 !(mnt_flags & MNT_NODEV)) {
2256 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
2257 !(mnt_flags & MNT_NOSUID)) {
2260 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
2261 !(mnt_flags & MNT_NOEXEC)) {
2264 if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
2265 ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
2269 err = security_sb_remount(sb, data);
2273 down_write(&sb->s_umount);
2274 if (ms_flags & MS_BIND)
2275 err = change_mount_flags(path->mnt, ms_flags);
2276 else if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
2279 err = do_remount_sb(sb, sb_flags, data, 0);
2282 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2283 mnt->mnt.mnt_flags = mnt_flags;
2284 touch_mnt_namespace(mnt->mnt_ns);
2285 unlock_mount_hash();
2287 up_write(&sb->s_umount);
2291 static inline int tree_contains_unbindable(struct mount *mnt)
2294 for (p = mnt; p; p = next_mnt(p, mnt)) {
2295 if (IS_MNT_UNBINDABLE(p))
2301 static int do_move_mount(struct path *path, const char *old_name)
2303 struct path old_path, parent_path;
2306 struct mountpoint *mp;
2308 if (!old_name || !*old_name)
2310 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
2314 mp = lock_mount(path);
2319 old = real_mount(old_path.mnt);
2320 p = real_mount(path->mnt);
2323 if (!check_mnt(p) || !check_mnt(old))
2326 if (old->mnt.mnt_flags & MNT_LOCKED)
2330 if (old_path.dentry != old_path.mnt->mnt_root)
2333 if (!mnt_has_parent(old))
2336 if (d_is_dir(path->dentry) !=
2337 d_is_dir(old_path.dentry))
2340 * Don't move a mount residing in a shared parent.
2342 if (IS_MNT_SHARED(old->mnt_parent))
2345 * Don't move a mount tree containing unbindable mounts to a destination
2346 * mount which is shared.
2348 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2351 for (; mnt_has_parent(p); p = p->mnt_parent)
2355 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
2359 /* if the mount is moved, it should no longer be expire
2361 list_del_init(&old->mnt_expire);
2366 path_put(&parent_path);
2367 path_put(&old_path);
2371 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
2374 const char *subtype = strchr(fstype, '.');
2383 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
2385 if (!mnt->mnt_sb->s_subtype)
2391 return ERR_PTR(err);
2395 * add a mount into a namespace's mount tree
2397 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2399 struct mountpoint *mp;
2400 struct mount *parent;
2403 mnt_flags &= ~MNT_INTERNAL_FLAGS;
2405 mp = lock_mount(path);
2409 parent = real_mount(path->mnt);
2411 if (unlikely(!check_mnt(parent))) {
2412 /* that's acceptable only for automounts done in private ns */
2413 if (!(mnt_flags & MNT_SHRINKABLE))
2415 /* ... and for those we'd better have mountpoint still alive */
2416 if (!parent->mnt_ns)
2420 /* Refuse the same filesystem on the same mount point */
2422 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2423 path->mnt->mnt_root == path->dentry)
2427 if (d_is_symlink(newmnt->mnt.mnt_root))
2430 newmnt->mnt.mnt_flags = mnt_flags;
2431 err = graft_tree(newmnt, parent, mp);
2438 static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags);
2441 * create a new mount for userspace and request it to be added into the
2444 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
2445 int mnt_flags, const char *name, void *data)
2447 struct file_system_type *type;
2448 struct vfsmount *mnt;
2454 type = get_fs_type(fstype);
2458 mnt = vfs_kern_mount(type, sb_flags, name, data);
2459 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
2460 !mnt->mnt_sb->s_subtype)
2461 mnt = fs_set_subtype(mnt, fstype);
2463 put_filesystem(type);
2465 return PTR_ERR(mnt);
2467 if (mount_too_revealing(mnt, &mnt_flags)) {
2472 err = do_add_mount(real_mount(mnt), path, mnt_flags);
2478 int finish_automount(struct vfsmount *m, struct path *path)
2480 struct mount *mnt = real_mount(m);
2482 /* The new mount record should have at least 2 refs to prevent it being
2483 * expired before we get a chance to add it
2485 BUG_ON(mnt_get_count(mnt) < 2);
2487 if (m->mnt_sb == path->mnt->mnt_sb &&
2488 m->mnt_root == path->dentry) {
2493 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2497 /* remove m from any expiration list it may be on */
2498 if (!list_empty(&mnt->mnt_expire)) {
2500 list_del_init(&mnt->mnt_expire);
2509 * mnt_set_expiry - Put a mount on an expiration list
2510 * @mnt: The mount to list.
2511 * @expiry_list: The list to add the mount to.
2513 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2517 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2521 EXPORT_SYMBOL(mnt_set_expiry);
2524 * process a list of expirable mountpoints with the intent of discarding any
2525 * mountpoints that aren't in use and haven't been touched since last we came
2528 void mark_mounts_for_expiry(struct list_head *mounts)
2530 struct mount *mnt, *next;
2531 LIST_HEAD(graveyard);
2533 if (list_empty(mounts))
2539 /* extract from the expiration list every vfsmount that matches the
2540 * following criteria:
2541 * - only referenced by its parent vfsmount
2542 * - still marked for expiry (marked on the last call here; marks are
2543 * cleared by mntput())
2545 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
2546 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
2547 propagate_mount_busy(mnt, 1))
2549 list_move(&mnt->mnt_expire, &graveyard);
2551 while (!list_empty(&graveyard)) {
2552 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
2553 touch_mnt_namespace(mnt->mnt_ns);
2554 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2556 unlock_mount_hash();
2560 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
2563 * Ripoff of 'select_parent()'
2565 * search the list of submounts for a given mountpoint, and move any
2566 * shrinkable submounts to the 'graveyard' list.
2568 static int select_submounts(struct mount *parent, struct list_head *graveyard)
2570 struct mount *this_parent = parent;
2571 struct list_head *next;
2575 next = this_parent->mnt_mounts.next;
2577 while (next != &this_parent->mnt_mounts) {
2578 struct list_head *tmp = next;
2579 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
2582 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
2585 * Descend a level if the d_mounts list is non-empty.
2587 if (!list_empty(&mnt->mnt_mounts)) {
2592 if (!propagate_mount_busy(mnt, 1)) {
2593 list_move_tail(&mnt->mnt_expire, graveyard);
2598 * All done at this level ... ascend and resume the search
2600 if (this_parent != parent) {
2601 next = this_parent->mnt_child.next;
2602 this_parent = this_parent->mnt_parent;
2609 * process a list of expirable mountpoints with the intent of discarding any
2610 * submounts of a specific parent mountpoint
2612 * mount_lock must be held for write
2614 static void shrink_submounts(struct mount *mnt)
2616 LIST_HEAD(graveyard);
2619 /* extract submounts of 'mountpoint' from the expiration list */
2620 while (select_submounts(mnt, &graveyard)) {
2621 while (!list_empty(&graveyard)) {
2622 m = list_first_entry(&graveyard, struct mount,
2624 touch_mnt_namespace(m->mnt_ns);
2625 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2631 * Some copy_from_user() implementations do not return the exact number of
2632 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2633 * Note that this function differs from copy_from_user() in that it will oops
2634 * on bad values of `to', rather than returning a short copy.
2636 static long exact_copy_from_user(void *to, const void __user * from,
2640 const char __user *f = from;
2643 if (!access_ok(VERIFY_READ, from, n))
2646 current->kernel_uaccess_faults_ok++;
2648 if (__get_user(c, f)) {
2656 current->kernel_uaccess_faults_ok--;
2660 void *copy_mount_options(const void __user * data)
2669 copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
2671 return ERR_PTR(-ENOMEM);
2673 /* We only care that *some* data at the address the user
2674 * gave us is valid. Just in case, we'll zero
2675 * the remainder of the page.
2677 /* copy_from_user cannot cross TASK_SIZE ! */
2678 size = TASK_SIZE - (unsigned long)data;
2679 if (size > PAGE_SIZE)
2682 i = size - exact_copy_from_user(copy, data, size);
2685 return ERR_PTR(-EFAULT);
2688 memset(copy + i, 0, PAGE_SIZE - i);
2692 char *copy_mount_string(const void __user *data)
2694 return data ? strndup_user(data, PAGE_SIZE) : NULL;
2698 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2699 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2701 * data is a (void *) that can point to any structure up to
2702 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2703 * information (or be NULL).
2705 * Pre-0.97 versions of mount() didn't have a flags word.
2706 * When the flags word was introduced its top half was required
2707 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2708 * Therefore, if this magic number is present, it carries no information
2709 * and must be discarded.
2711 long do_mount(const char *dev_name, const char __user *dir_name,
2712 const char *type_page, unsigned long flags, void *data_page)
2715 unsigned int mnt_flags = 0, sb_flags;
2719 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2720 flags &= ~MS_MGC_MSK;
2722 /* Basic sanity checks */
2724 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2726 if (flags & MS_NOUSER)
2729 /* ... and get the mountpoint */
2730 retval = user_path(dir_name, &path);
2734 retval = security_sb_mount(dev_name, &path,
2735 type_page, flags, data_page);
2736 if (!retval && !may_mount())
2738 if (!retval && (flags & SB_MANDLOCK) && !may_mandlock())
2743 /* Default to relatime unless overriden */
2744 if (!(flags & MS_NOATIME))
2745 mnt_flags |= MNT_RELATIME;
2747 /* Separate the per-mountpoint flags */
2748 if (flags & MS_NOSUID)
2749 mnt_flags |= MNT_NOSUID;
2750 if (flags & MS_NODEV)
2751 mnt_flags |= MNT_NODEV;
2752 if (flags & MS_NOEXEC)
2753 mnt_flags |= MNT_NOEXEC;
2754 if (flags & MS_NOATIME)
2755 mnt_flags |= MNT_NOATIME;
2756 if (flags & MS_NODIRATIME)
2757 mnt_flags |= MNT_NODIRATIME;
2758 if (flags & MS_STRICTATIME)
2759 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2760 if (flags & MS_RDONLY)
2761 mnt_flags |= MNT_READONLY;
2763 /* The default atime for remount is preservation */
2764 if ((flags & MS_REMOUNT) &&
2765 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
2766 MS_STRICTATIME)) == 0)) {
2767 mnt_flags &= ~MNT_ATIME_MASK;
2768 mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
2771 sb_flags = flags & (SB_RDONLY |
2780 if (flags & MS_REMOUNT)
2781 retval = do_remount(&path, flags, sb_flags, mnt_flags,
2783 else if (flags & MS_BIND)
2784 retval = do_loopback(&path, dev_name, flags & MS_REC);
2785 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2786 retval = do_change_type(&path, flags);
2787 else if (flags & MS_MOVE)
2788 retval = do_move_mount(&path, dev_name);
2790 retval = do_new_mount(&path, type_page, sb_flags, mnt_flags,
2791 dev_name, data_page);
2797 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
2799 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
2802 static void dec_mnt_namespaces(struct ucounts *ucounts)
2804 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
2807 static void free_mnt_ns(struct mnt_namespace *ns)
2809 ns_free_inum(&ns->ns);
2810 dec_mnt_namespaces(ns->ucounts);
2811 put_user_ns(ns->user_ns);
2816 * Assign a sequence number so we can detect when we attempt to bind
2817 * mount a reference to an older mount namespace into the current
2818 * mount namespace, preventing reference counting loops. A 64bit
2819 * number incrementing at 10Ghz will take 12,427 years to wrap which
2820 * is effectively never, so we can ignore the possibility.
2822 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
2824 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
2826 struct mnt_namespace *new_ns;
2827 struct ucounts *ucounts;
2830 ucounts = inc_mnt_namespaces(user_ns);
2832 return ERR_PTR(-ENOSPC);
2834 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2836 dec_mnt_namespaces(ucounts);
2837 return ERR_PTR(-ENOMEM);
2839 ret = ns_alloc_inum(&new_ns->ns);
2842 dec_mnt_namespaces(ucounts);
2843 return ERR_PTR(ret);
2845 new_ns->ns.ops = &mntns_operations;
2846 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
2847 atomic_set(&new_ns->count, 1);
2848 new_ns->root = NULL;
2849 INIT_LIST_HEAD(&new_ns->list);
2850 init_waitqueue_head(&new_ns->poll);
2852 new_ns->user_ns = get_user_ns(user_ns);
2853 new_ns->ucounts = ucounts;
2855 new_ns->pending_mounts = 0;
2860 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2861 struct user_namespace *user_ns, struct fs_struct *new_fs)
2863 struct mnt_namespace *new_ns;
2864 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2865 struct mount *p, *q;
2872 if (likely(!(flags & CLONE_NEWNS))) {
2879 new_ns = alloc_mnt_ns(user_ns);
2884 /* First pass: copy the tree topology */
2885 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
2886 if (user_ns != ns->user_ns)
2887 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2888 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2891 free_mnt_ns(new_ns);
2892 return ERR_CAST(new);
2895 list_add_tail(&new_ns->list, &new->mnt_list);
2898 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2899 * as belonging to new namespace. We have already acquired a private
2900 * fs_struct, so tsk->fs->lock is not needed.
2908 if (&p->mnt == new_fs->root.mnt) {
2909 new_fs->root.mnt = mntget(&q->mnt);
2912 if (&p->mnt == new_fs->pwd.mnt) {
2913 new_fs->pwd.mnt = mntget(&q->mnt);
2917 p = next_mnt(p, old);
2918 q = next_mnt(q, new);
2921 while (p->mnt.mnt_root != q->mnt.mnt_root)
2922 p = next_mnt(p, old);
2935 * create_mnt_ns - creates a private namespace and adds a root filesystem
2936 * @mnt: pointer to the new root filesystem mountpoint
2938 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2940 struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
2941 if (!IS_ERR(new_ns)) {
2942 struct mount *mnt = real_mount(m);
2943 mnt->mnt_ns = new_ns;
2946 list_add(&mnt->mnt_list, &new_ns->list);
2953 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2955 struct mnt_namespace *ns;
2956 struct super_block *s;
2960 ns = create_mnt_ns(mnt);
2962 return ERR_CAST(ns);
2964 err = vfs_path_lookup(mnt->mnt_root, mnt,
2965 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2970 return ERR_PTR(err);
2972 /* trade a vfsmount reference for active sb one */
2973 s = path.mnt->mnt_sb;
2974 atomic_inc(&s->s_active);
2976 /* lock the sucker */
2977 down_write(&s->s_umount);
2978 /* ... and return the root of (sub)tree on it */
2981 EXPORT_SYMBOL(mount_subtree);
2983 int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type,
2984 unsigned long flags, void __user *data)
2991 kernel_type = copy_mount_string(type);
2992 ret = PTR_ERR(kernel_type);
2993 if (IS_ERR(kernel_type))
2996 kernel_dev = copy_mount_string(dev_name);
2997 ret = PTR_ERR(kernel_dev);
2998 if (IS_ERR(kernel_dev))
3001 options = copy_mount_options(data);
3002 ret = PTR_ERR(options);
3003 if (IS_ERR(options))
3006 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
3017 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3018 char __user *, type, unsigned long, flags, void __user *, data)
3020 return ksys_mount(dev_name, dir_name, type, flags, data);
3024 * Return true if path is reachable from root
3026 * namespace_sem or mount_lock is held
3028 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
3029 const struct path *root)
3031 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
3032 dentry = mnt->mnt_mountpoint;
3033 mnt = mnt->mnt_parent;
3035 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
3038 bool path_is_under(const struct path *path1, const struct path *path2)
3041 read_seqlock_excl(&mount_lock);
3042 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
3043 read_sequnlock_excl(&mount_lock);
3046 EXPORT_SYMBOL(path_is_under);
3049 * pivot_root Semantics:
3050 * Moves the root file system of the current process to the directory put_old,
3051 * makes new_root as the new root file system of the current process, and sets
3052 * root/cwd of all processes which had them on the current root to new_root.
3055 * The new_root and put_old must be directories, and must not be on the
3056 * same file system as the current process root. The put_old must be
3057 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3058 * pointed to by put_old must yield the same directory as new_root. No other
3059 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3061 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3062 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
3063 * in this situation.
3066 * - we don't move root/cwd if they are not at the root (reason: if something
3067 * cared enough to change them, it's probably wrong to force them elsewhere)
3068 * - it's okay to pick a root that isn't the root of a file system, e.g.
3069 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3070 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3073 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
3074 const char __user *, put_old)
3076 struct path new, old, parent_path, root_parent, root;
3077 struct mount *new_mnt, *root_mnt, *old_mnt;
3078 struct mountpoint *old_mp, *root_mp;
3084 error = user_path_dir(new_root, &new);
3088 error = user_path_dir(put_old, &old);
3092 error = security_sb_pivotroot(&old, &new);
3096 get_fs_root(current->fs, &root);
3097 old_mp = lock_mount(&old);
3098 error = PTR_ERR(old_mp);
3103 new_mnt = real_mount(new.mnt);
3104 root_mnt = real_mount(root.mnt);
3105 old_mnt = real_mount(old.mnt);
3106 if (IS_MNT_SHARED(old_mnt) ||
3107 IS_MNT_SHARED(new_mnt->mnt_parent) ||
3108 IS_MNT_SHARED(root_mnt->mnt_parent))
3110 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
3112 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
3115 if (d_unlinked(new.dentry))
3118 if (new_mnt == root_mnt || old_mnt == root_mnt)
3119 goto out4; /* loop, on the same file system */
3121 if (root.mnt->mnt_root != root.dentry)
3122 goto out4; /* not a mountpoint */
3123 if (!mnt_has_parent(root_mnt))
3124 goto out4; /* not attached */
3125 root_mp = root_mnt->mnt_mp;
3126 if (new.mnt->mnt_root != new.dentry)
3127 goto out4; /* not a mountpoint */
3128 if (!mnt_has_parent(new_mnt))
3129 goto out4; /* not attached */
3130 /* make sure we can reach put_old from new_root */
3131 if (!is_path_reachable(old_mnt, old.dentry, &new))
3133 /* make certain new is below the root */
3134 if (!is_path_reachable(new_mnt, new.dentry, &root))
3136 root_mp->m_count++; /* pin it so it won't go away */
3138 detach_mnt(new_mnt, &parent_path);
3139 detach_mnt(root_mnt, &root_parent);
3140 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
3141 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
3142 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
3144 /* mount old root on put_old */
3145 attach_mnt(root_mnt, old_mnt, old_mp);
3146 /* mount new_root on / */
3147 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
3148 touch_mnt_namespace(current->nsproxy->mnt_ns);
3149 /* A moved mount should not expire automatically */
3150 list_del_init(&new_mnt->mnt_expire);
3151 put_mountpoint(root_mp);
3152 unlock_mount_hash();
3153 chroot_fs_refs(&root, &new);
3156 unlock_mount(old_mp);
3158 path_put(&root_parent);
3159 path_put(&parent_path);
3171 static void __init init_mount_tree(void)
3173 struct vfsmount *mnt;
3174 struct mnt_namespace *ns;
3176 struct file_system_type *type;
3178 type = get_fs_type("rootfs");
3180 panic("Can't find rootfs type");
3181 mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
3182 put_filesystem(type);
3184 panic("Can't create rootfs");
3186 ns = create_mnt_ns(mnt);
3188 panic("Can't allocate initial namespace");
3190 init_task.nsproxy->mnt_ns = ns;
3194 root.dentry = mnt->mnt_root;
3195 mnt->mnt_flags |= MNT_LOCKED;
3197 set_fs_pwd(current->fs, &root);
3198 set_fs_root(current->fs, &root);
3201 void __init mnt_init(void)
3205 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
3206 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3208 mount_hashtable = alloc_large_system_hash("Mount-cache",
3209 sizeof(struct hlist_head),
3212 &m_hash_shift, &m_hash_mask, 0, 0);
3213 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
3214 sizeof(struct hlist_head),
3217 &mp_hash_shift, &mp_hash_mask, 0, 0);
3219 if (!mount_hashtable || !mountpoint_hashtable)
3220 panic("Failed to allocate mount hash table\n");
3226 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
3228 fs_kobj = kobject_create_and_add("fs", NULL);
3230 printk(KERN_WARNING "%s: kobj create error\n", __func__);
3235 void put_mnt_ns(struct mnt_namespace *ns)
3237 if (!atomic_dec_and_test(&ns->count))
3239 drop_collected_mounts(&ns->root->mnt);
3243 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
3245 struct vfsmount *mnt;
3246 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, data);
3249 * it is a longterm mount, don't release mnt until
3250 * we unmount before file sys is unregistered
3252 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
3256 EXPORT_SYMBOL_GPL(kern_mount_data);
3258 void kern_unmount(struct vfsmount *mnt)
3260 /* release long term mount so mount point can be released */
3261 if (!IS_ERR_OR_NULL(mnt)) {
3262 real_mount(mnt)->mnt_ns = NULL;
3263 synchronize_rcu(); /* yecchhh... */
3267 EXPORT_SYMBOL(kern_unmount);
3269 bool our_mnt(struct vfsmount *mnt)
3271 return check_mnt(real_mount(mnt));
3274 bool current_chrooted(void)
3276 /* Does the current process have a non-standard root */
3277 struct path ns_root;
3278 struct path fs_root;
3281 /* Find the namespace root */
3282 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt;
3283 ns_root.dentry = ns_root.mnt->mnt_root;
3285 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
3288 get_fs_root(current->fs, &fs_root);
3290 chrooted = !path_equal(&fs_root, &ns_root);
3298 static bool mnt_already_visible(struct mnt_namespace *ns, struct vfsmount *new,
3301 int new_flags = *new_mnt_flags;
3303 bool visible = false;
3305 down_read(&namespace_sem);
3306 list_for_each_entry(mnt, &ns->list, mnt_list) {
3307 struct mount *child;
3310 if (mnt->mnt.mnt_sb->s_type != new->mnt_sb->s_type)
3313 /* This mount is not fully visible if it's root directory
3314 * is not the root directory of the filesystem.
3316 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
3319 /* A local view of the mount flags */
3320 mnt_flags = mnt->mnt.mnt_flags;
3322 /* Don't miss readonly hidden in the superblock flags */
3323 if (sb_rdonly(mnt->mnt.mnt_sb))
3324 mnt_flags |= MNT_LOCK_READONLY;
3326 /* Verify the mount flags are equal to or more permissive
3327 * than the proposed new mount.
3329 if ((mnt_flags & MNT_LOCK_READONLY) &&
3330 !(new_flags & MNT_READONLY))
3332 if ((mnt_flags & MNT_LOCK_ATIME) &&
3333 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
3336 /* This mount is not fully visible if there are any
3337 * locked child mounts that cover anything except for
3338 * empty directories.
3340 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3341 struct inode *inode = child->mnt_mountpoint->d_inode;
3342 /* Only worry about locked mounts */
3343 if (!(child->mnt.mnt_flags & MNT_LOCKED))
3345 /* Is the directory permanetly empty? */
3346 if (!is_empty_dir_inode(inode))
3349 /* Preserve the locked attributes */
3350 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
3357 up_read(&namespace_sem);
3361 static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags)
3363 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
3364 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
3365 unsigned long s_iflags;
3367 if (ns->user_ns == &init_user_ns)
3370 /* Can this filesystem be too revealing? */
3371 s_iflags = mnt->mnt_sb->s_iflags;
3372 if (!(s_iflags & SB_I_USERNS_VISIBLE))
3375 if ((s_iflags & required_iflags) != required_iflags) {
3376 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
3381 return !mnt_already_visible(ns, mnt, new_mnt_flags);
3384 bool mnt_may_suid(struct vfsmount *mnt)
3387 * Foreign mounts (accessed via fchdir or through /proc
3388 * symlinks) are always treated as if they are nosuid. This
3389 * prevents namespaces from trusting potentially unsafe
3390 * suid/sgid bits, file caps, or security labels that originate
3391 * in other namespaces.
3393 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
3394 current_in_userns(mnt->mnt_sb->s_user_ns);
3397 static struct ns_common *mntns_get(struct task_struct *task)
3399 struct ns_common *ns = NULL;
3400 struct nsproxy *nsproxy;
3403 nsproxy = task->nsproxy;
3405 ns = &nsproxy->mnt_ns->ns;
3406 get_mnt_ns(to_mnt_ns(ns));
3413 static void mntns_put(struct ns_common *ns)
3415 put_mnt_ns(to_mnt_ns(ns));
3418 static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
3420 struct fs_struct *fs = current->fs;
3421 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
3425 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
3426 !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
3427 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
3434 old_mnt_ns = nsproxy->mnt_ns;
3435 nsproxy->mnt_ns = mnt_ns;
3438 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
3439 "/", LOOKUP_DOWN, &root);
3441 /* revert to old namespace */
3442 nsproxy->mnt_ns = old_mnt_ns;
3447 put_mnt_ns(old_mnt_ns);
3449 /* Update the pwd and root */
3450 set_fs_pwd(fs, &root);
3451 set_fs_root(fs, &root);
3457 static struct user_namespace *mntns_owner(struct ns_common *ns)
3459 return to_mnt_ns(ns)->user_ns;
3462 const struct proc_ns_operations mntns_operations = {
3464 .type = CLONE_NEWNS,
3467 .install = mntns_install,
3468 .owner = mntns_owner,