Newer
Older
/*
* linux/fs/namespace.c
*
* (C) Copyright Al Viro 2000, 2001
* Released under GPL v2.
*
* Based on code from fs/super.c, copyright Linus Torvalds and others.
* Heavily rewritten.
*/
#include <linux/syscalls.h>
#include <linux/mnt_namespace.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/acct.h> /* acct_auto_close_mnt */
#include <linux/ramfs.h> /* init_rootfs */
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
#define HASH_SIZE (1UL << HASH_SHIFT)
static DEFINE_IDA(mnt_group_ida);
static int mnt_id_start = 0;
static int mnt_group_start = 1;
static struct list_head *mount_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
struct kobject *fs_kobj;
EXPORT_SYMBOL_GPL(fs_kobj);
/*
* vfsmount lock may be taken for read to prevent changes to the
* vfsmount hash, ie. during mountpoint lookups or walking back
* up the tree.
*
* It should be taken for write in all cases where the vfsmount
* tree or hash is modified or when a vfsmount structure is modified.
*/
DEFINE_BRLOCK(vfsmount_lock);
static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
tmp = tmp + (tmp >> HASH_SHIFT);
return tmp & (HASH_SIZE - 1);
#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
/*
* allocation is serialized by namespace_sem, but we need the spinlock to
* serialize with freeing.
*/
static int mnt_alloc_id(struct mount *mnt)
{
int res;
retry:
ida_pre_get(&mnt_id_ida, GFP_KERNEL);
res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
if (res == -EAGAIN)
goto retry;
return res;
}
static void mnt_free_id(struct mount *mnt)
ida_remove(&mnt_id_ida, id);
if (mnt_id_start > id)
mnt_id_start = id;
/*
* Allocate a new peer group ID
*
* mnt_group_ida is protected by namespace_sem
*/
static int mnt_alloc_group_id(struct mount *mnt)
if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
return -ENOMEM;
res = ida_get_new_above(&mnt_group_ida,
mnt_group_start,
}
/*
* Release a peer group ID
*/
void mnt_release_group_id(struct mount *mnt)
ida_remove(&mnt_group_ida, id);
if (mnt_group_start > id)
mnt_group_start = id;
/*
* vfsmount lock must be held for read
*/
static inline void mnt_add_count(struct mount *mnt, int n)
this_cpu_add(mnt->mnt_pcp->mnt_count, n);
preempt_enable();
#endif
}
/*
* vfsmount lock must be held for write
*/
unsigned int mnt_get_count(struct mount *mnt)
count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
static struct mount *alloc_vfsmnt(const char *name)
struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
if (mnt) {
if (err)
goto out_free_cache;
if (name) {
mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
if (!mnt->mnt_devname)
mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
if (!mnt->mnt_pcp)
this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
mnt->mnt_count = 1;
mnt->mnt_writers = 0;
INIT_LIST_HEAD(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_mounts);
INIT_LIST_HEAD(&mnt->mnt_list);
INIT_LIST_HEAD(&mnt->mnt_expire);
INIT_LIST_HEAD(&mnt->mnt_share);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
Loading
Loading full blame...