Newer
Older
if (atomic_read(&mnt->mnt_count) == 2) {
/* delete from the namespace */
list_del_init(&mnt->mnt_list);
mnt->mnt_namespace = NULL;
spin_unlock(&vfsmount_lock);
} else {
/*
* Someone brought it back to life whilst we didn't have any
* locks held so return it to the expiration list
*/
list_add_tail(&mnt->mnt_expire, mounts);
spin_unlock(&vfsmount_lock);
}
}
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
/*
* process a list of expirable mountpoints with the intent of discarding any
* mountpoints that aren't in use and haven't been touched since last we came
* here
*/
void mark_mounts_for_expiry(struct list_head *mounts)
{
struct namespace *namespace;
struct vfsmount *mnt, *next;
LIST_HEAD(graveyard);
if (list_empty(mounts))
return;
spin_lock(&vfsmount_lock);
/* extract from the expiration list every vfsmount that matches the
* following criteria:
* - only referenced by its parent vfsmount
* - still marked for expiry (marked on the last call here; marks are
* cleared by mntput())
*/
list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
if (!xchg(&mnt->mnt_expiry_mark, 1) ||
atomic_read(&mnt->mnt_count) != 1)
continue;
mntget(mnt);
list_move(&mnt->mnt_expire, &graveyard);
}
/*
* go through the vfsmounts we've just consigned to the graveyard to
* - check that they're still dead
* - delete the vfsmount from the appropriate namespace under lock
* - dispose of the corpse
*/
while (!list_empty(&graveyard)) {
mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire);
list_del_init(&mnt->mnt_expire);
/* don't do anything if the namespace is dead - all the
* vfsmounts from it are going away anyway */
namespace = mnt->mnt_namespace;
if (!namespace || !namespace->root)
continue;
get_namespace(namespace);
spin_unlock(&vfsmount_lock);
mntput(mnt);
put_namespace(namespace);
spin_lock(&vfsmount_lock);
}
spin_unlock(&vfsmount_lock);
}
EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
/*
* Some copy_from_user() implementations do not return the exact number of
* bytes remaining to copy on a fault. But copy_mount_options() requires that.
* Note that this function differs from copy_from_user() in that it will oops
* on bad values of `to', rather than returning a short copy.
*/
static long exact_copy_from_user(void *to, const void __user * from,
unsigned long n)
{
char *t = to;
const char __user *f = from;
char c;
if (!access_ok(VERIFY_READ, from, n))
return n;
while (n) {
if (__get_user(c, f)) {
memset(t, 0, n);
break;
}
*t++ = c;
f++;
n--;
}
return n;
}
int copy_mount_options(const void __user * data, unsigned long *where)
{
int i;
unsigned long page;
unsigned long size;
*where = 0;
if (!data)
return 0;
if (!(page = __get_free_page(GFP_KERNEL)))
return -ENOMEM;
/* We only care that *some* data at the address the user
* gave us is valid. Just in case, we'll zero
* the remainder of the page.
*/
/* copy_from_user cannot cross TASK_SIZE ! */
size = TASK_SIZE - (unsigned long)data;
if (size > PAGE_SIZE)
size = PAGE_SIZE;
i = size - exact_copy_from_user((void *)page, data, size);
if (!i) {
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
return -EFAULT;
}
if (i != PAGE_SIZE)
memset((char *)page + i, 0, PAGE_SIZE - i);
*where = page;
return 0;
}
/*
* Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
* be given to the mount() call (ie: read-only, no-dev, no-suid etc).
*
* data is a (void *) that can point to any structure up to
* PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
* information (or be NULL).
*
* Pre-0.97 versions of mount() didn't have a flags word.
* When the flags word was introduced its top half was required
* to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
* Therefore, if this magic number is present, it carries no information
* and must be discarded.
*/
long do_mount(char *dev_name, char *dir_name, char *type_page,
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
unsigned long flags, void *data_page)
{
struct nameidata nd;
int retval = 0;
int mnt_flags = 0;
/* Discard magic */
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
flags &= ~MS_MGC_MSK;
/* Basic sanity checks */
if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
return -EINVAL;
if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
return -EINVAL;
if (data_page)
((char *)data_page)[PAGE_SIZE - 1] = 0;
/* Separate the per-mountpoint flags */
if (flags & MS_NOSUID)
mnt_flags |= MNT_NOSUID;
if (flags & MS_NODEV)
mnt_flags |= MNT_NODEV;
if (flags & MS_NOEXEC)
mnt_flags |= MNT_NOEXEC;
flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE);
/* ... and get the mountpoint */
retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
if (retval)
return retval;
retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
if (retval)
goto dput_out;
if (flags & MS_REMOUNT)
retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
data_page);
else if (flags & MS_BIND)
retval = do_loopback(&nd, dev_name, flags & MS_REC);
retval = do_change_type(&nd, flags);
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
else if (flags & MS_MOVE)
retval = do_move_mount(&nd, dev_name);
else
retval = do_new_mount(&nd, type_page, flags, mnt_flags,
dev_name, data_page);
dput_out:
path_release(&nd);
return retval;
}
int copy_namespace(int flags, struct task_struct *tsk)
{
struct namespace *namespace = tsk->namespace;
struct namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
struct fs_struct *fs = tsk->fs;
struct vfsmount *p, *q;
if (!namespace)
return 0;
get_namespace(namespace);
if (!(flags & CLONE_NEWNS))
return 0;
if (!capable(CAP_SYS_ADMIN)) {
put_namespace(namespace);
return -EPERM;
}
new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL);
if (!new_ns)
goto out;
atomic_set(&new_ns->count, 1);
INIT_LIST_HEAD(&new_ns->list);
init_waitqueue_head(&new_ns->poll);
new_ns->event = 0;
new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root,
CL_EXPIRE);
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
kfree(new_ns);
goto out;
}
spin_lock(&vfsmount_lock);
list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
spin_unlock(&vfsmount_lock);
/*
* Second pass: switch the tsk->fs->* elements and mark new vfsmounts
* as belonging to new namespace. We have already acquired a private
* fs_struct, so tsk->fs->lock is not needed.
*/
p = namespace->root;
q = new_ns->root;
while (p) {
q->mnt_namespace = new_ns;
if (fs) {
if (p == fs->rootmnt) {
rootmnt = p;
fs->rootmnt = mntget(q);
}
if (p == fs->pwdmnt) {
pwdmnt = p;
fs->pwdmnt = mntget(q);
}
if (p == fs->altrootmnt) {
altrootmnt = p;
fs->altrootmnt = mntget(q);
}
}
p = next_mnt(p, namespace->root);
q = next_mnt(q, new_ns->root);
}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
tsk->namespace = new_ns;
if (rootmnt)
mntput(rootmnt);
if (pwdmnt)
mntput(pwdmnt);
if (altrootmnt)
mntput(altrootmnt);
put_namespace(namespace);
return 0;
out:
put_namespace(namespace);
return -ENOMEM;
}
asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
char __user * type, unsigned long flags,
void __user * data)
{
int retval;
unsigned long data_page;
unsigned long type_page;
unsigned long dev_page;
char *dir_page;
retval = copy_mount_options(type, &type_page);
if (retval < 0)
return retval;
dir_page = getname(dir_name);
retval = PTR_ERR(dir_page);
if (IS_ERR(dir_page))
goto out1;
retval = copy_mount_options(dev_name, &dev_page);
retval = copy_mount_options(data, &data_page);
if (retval < 0)
goto out3;
lock_kernel();
retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
flags, (void *)data_page);
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
unlock_kernel();
free_page(data_page);
out3:
free_page(dev_page);
out2:
putname(dir_page);
out1:
free_page(type_page);
return retval;
}
/*
* Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
* It can block. Requires the big lock held.
*/
void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
struct dentry *dentry)
{
struct dentry *old_root;
struct vfsmount *old_rootmnt;
write_lock(&fs->lock);
old_root = fs->root;
old_rootmnt = fs->rootmnt;
fs->rootmnt = mntget(mnt);
fs->root = dget(dentry);
write_unlock(&fs->lock);
if (old_root) {
dput(old_root);
mntput(old_rootmnt);
}
}
/*
* Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
* It can block. Requires the big lock held.
*/
void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
struct dentry *dentry)
{
struct dentry *old_pwd;
struct vfsmount *old_pwdmnt;
write_lock(&fs->lock);
old_pwd = fs->pwd;
old_pwdmnt = fs->pwdmnt;
fs->pwdmnt = mntget(mnt);
fs->pwd = dget(dentry);
write_unlock(&fs->lock);
if (old_pwd) {
dput(old_pwd);
mntput(old_pwdmnt);
}
}
static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
{
struct task_struct *g, *p;
struct fs_struct *fs;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
fs = p->fs;
if (fs) {
atomic_inc(&fs->count);
task_unlock(p);
if (fs->root == old_nd->dentry
&& fs->rootmnt == old_nd->mnt)
if (fs->pwd == old_nd->dentry
&& fs->pwdmnt == old_nd->mnt)
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
put_fs_struct(fs);
} else
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
/*
* pivot_root Semantics:
* Moves the root file system of the current process to the directory put_old,
* makes new_root as the new root file system of the current process, and sets
* root/cwd of all processes which had them on the current root to new_root.
*
* Restrictions:
* The new_root and put_old must be directories, and must not be on the
* same file system as the current process root. The put_old must be
* underneath new_root, i.e. adding a non-zero number of /.. to the string
* pointed to by put_old must yield the same directory as new_root. No other
* file system may be mounted on put_old. After all, new_root is a mountpoint.
*
* Notes:
* - we don't move root/cwd if they are not at the root (reason: if something
* cared enough to change them, it's probably wrong to force them elsewhere)
* - it's okay to pick a root that isn't the root of a file system, e.g.
* /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
* though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
* first.
*/
asmlinkage long sys_pivot_root(const char __user * new_root,
const char __user * put_old)
{
struct vfsmount *tmp;
struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
int error;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
&new_nd);
if (error)
goto out0;
error = -EINVAL;
if (!check_mnt(new_nd.mnt))
goto out1;
error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
if (error)
goto out1;
error = security_sb_pivotroot(&old_nd, &new_nd);
if (error) {
path_release(&old_nd);
goto out1;
}
read_lock(¤t->fs->lock);
user_nd.mnt = mntget(current->fs->rootmnt);
user_nd.dentry = dget(current->fs->root);
read_unlock(¤t->fs->lock);
down(&old_nd.dentry->d_inode->i_sem);
error = -EINVAL;
if (!check_mnt(user_nd.mnt))
goto out2;
error = -ENOENT;
if (IS_DEADDIR(new_nd.dentry->d_inode))
goto out2;
if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
goto out2;
if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
goto out2;
error = -EBUSY;
if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
goto out2; /* loop, on the same file system */
error = -EINVAL;
if (user_nd.mnt->mnt_root != user_nd.dentry)
goto out2; /* not a mountpoint */
if (user_nd.mnt->mnt_parent == user_nd.mnt)
goto out2; /* not attached */
if (new_nd.mnt->mnt_root != new_nd.dentry)
goto out2; /* not a mountpoint */
if (new_nd.mnt->mnt_parent == new_nd.mnt)
goto out2; /* not attached */
tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
spin_lock(&vfsmount_lock);
if (tmp != new_nd.mnt) {
for (;;) {
if (tmp->mnt_parent == tmp)
goto out3; /* already mounted on put_old */
if (tmp->mnt_parent == new_nd.mnt)
break;
tmp = tmp->mnt_parent;
}
if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
goto out3;
} else if (!is_subdir(old_nd.dentry, new_nd.dentry))
goto out3;
detach_mnt(new_nd.mnt, &parent_nd);
detach_mnt(user_nd.mnt, &root_parent);
attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */
attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
spin_unlock(&vfsmount_lock);
chroot_fs_refs(&user_nd, &new_nd);
security_sb_post_pivotroot(&user_nd, &new_nd);
error = 0;
path_release(&root_parent);
path_release(&parent_nd);
out2:
up(&old_nd.dentry->d_inode->i_sem);
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
path_release(&user_nd);
path_release(&old_nd);
out1:
path_release(&new_nd);
out0:
unlock_kernel();
return error;
out3:
spin_unlock(&vfsmount_lock);
goto out2;
}
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
struct namespace *namespace;
struct task_struct *g, *p;
mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
if (IS_ERR(mnt))
panic("Can't create rootfs");
namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
if (!namespace)
panic("Can't allocate initial namespace");
atomic_set(&namespace->count, 1);
INIT_LIST_HEAD(&namespace->list);
init_waitqueue_head(&namespace->poll);
namespace->event = 0;
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
list_add(&mnt->mnt_list, &namespace->list);
namespace->root = mnt;
mnt->mnt_namespace = namespace;
init_task.namespace = namespace;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
get_namespace(namespace);
p->namespace = namespace;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
set_fs_root(current->fs, namespace->root, namespace->root->mnt_root);
}
void __init mnt_init(unsigned long mempages)
{
struct list_head *d;
unsigned int nr_hash;
int i;
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
if (!mount_hashtable)
panic("Failed to allocate mount hash table\n");
/*
* Find the power-of-two list-heads that can fit into the allocation..
* We don't guarantee that "sizeof(struct list_head)" is necessarily
* a power-of-two.
*/
nr_hash = PAGE_SIZE / sizeof(struct list_head);
hash_bits = 0;
do {
hash_bits++;
} while ((nr_hash >> hash_bits) != 0);
hash_bits--;
/*
* Re-calculate the actual number of entries and the mask
* from the number of bits we can fit.
*/
nr_hash = 1UL << hash_bits;
printk("Mount-cache hash table entries: %d\n", nr_hash);
/* And initialize the newly allocated array */
d = mount_hashtable;
i = nr_hash;
do {
INIT_LIST_HEAD(d);
d++;
i--;
} while (i);
sysfs_init();
init_rootfs();
init_mount_tree();
}
void __put_namespace(struct namespace *namespace)
{
struct vfsmount *root = namespace->root;
namespace->root = NULL;
spin_unlock(&vfsmount_lock);