Newer
Older
exit_keys(tsk);
if (group_dead && tsk->signal->leader)
disassociate_ctty(1);
module_put(task_thread_info(tsk)->exec_domain->module);
if (tsk->binfmt)
module_put(tsk->binfmt->module);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
/*
* This must happen late, after the PID is not
* hashed anymore:
*/
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
* Make sure we are holding no locks:
/*
* We can do this unlocked here. The futex code uses this flag
* just to verify whether the pi state cleanup has been done
* or not. In the worst case it loops once more.
*/
tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
exit_io_context();
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
/* causes final put_task_struct in finish_task_switch(). */
schedule();
BUG();
/* Avoid "noreturn function does return". */
for (;;)
cpu_relax(); /* For when BUG is null */
EXPORT_SYMBOL_GPL(do_exit);
NORET_TYPE void complete_and_exit(struct completion *comp, long code)
{
if (comp)
complete(comp);
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
do_exit(code);
}
EXPORT_SYMBOL(complete_and_exit);
asmlinkage long sys_exit(int error_code)
{
do_exit((error_code&0xff)<<8);
}
/*
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
*/
NORET_TYPE void
do_group_exit(int exit_code)
{
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
if (current->signal->flags & SIGNAL_GROUP_EXIT)
exit_code = current->signal->group_exit_code;
else if (!thread_group_empty(current)) {
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
if (sig->flags & SIGNAL_GROUP_EXIT)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
}
do_exit(exit_code);
/* NOTREACHED */
}
/*
* this kills every thread in the thread group. Note that any externally
* wait4()-ing process will get the correct exit code - even if this
* thread is not the thread group leader.
*/
asmlinkage void sys_exit_group(int error_code)
{
do_group_exit((error_code & 0xff) << 8);
}
static int eligible_child(pid_t pid, int options, struct task_struct *p)
struct pid_namespace *ns;
ns = current->nsproxy->pid_ns;
if (task_pid_nr_ns(p, ns) != pid)
if (task_pgrp_nr_ns(p, ns) != task_pgrp_vnr(current))
if (task_pgrp_nr_ns(p, ns) != -pid)
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
return 0;
}
/*
* Do not consider detached threads that are
* not ptraced:
*/
if (p->exit_signal == -1 && !p->ptrace)
return 0;
/* Wait for all children (clone and not) if __WALL is set;
* otherwise, wait for clone children *only* if __WCLONE is
* set; otherwise, wait for non-clone children *only*. (Note:
* A "clone" child here is one that reports to its parent
* using a signal other than SIGCHLD.) */
if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
&& !(options & __WALL))
return 0;
/*
* Do not consider thread group leaders that are
* in a non-empty thread group:
*/
if (delay_group_leader(p))
err = security_task_wait(p);
if (err)
return err;
static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
int why, int status,
struct siginfo __user *infop,
struct rusage __user *rusagep)
{
int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
put_task_struct(p);
if (!retval)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval)
retval = put_user(0, &infop->si_errno);
if (!retval)
retval = put_user((short)why, &infop->si_code);
if (!retval)
retval = put_user(pid, &infop->si_pid);
if (!retval)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = put_user(status, &infop->si_status);
if (!retval)
retval = pid;
return retval;
}
/*
* Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_zombie(struct task_struct *p, int noreap,
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
unsigned long state;
int retval, status, traced;
struct pid_namespace *ns;
ns = current->nsproxy->pid_ns;
pid_t pid = task_pid_nr_ns(p, ns);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
uid_t uid = p->uid;
int exit_code = p->exit_code;
int why, status;
if (unlikely(p->exit_state != EXIT_ZOMBIE))
return 0;
if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
return 0;
get_task_struct(p);
read_unlock(&tasklist_lock);
if ((exit_code & 0x7f) == 0) {
why = CLD_EXITED;
status = exit_code >> 8;
} else {
why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
status = exit_code & 0x7f;
}
return wait_noreap_copyout(p, pid, uid, why,
status, infop, ru);
}
/*
* Try to move the task's state to DEAD
* only one thread is allowed to do this:
*/
state = xchg(&p->exit_state, EXIT_DEAD);
if (state != EXIT_ZOMBIE) {
BUG_ON(state != EXIT_DEAD);
return 0;
}
/* traced means p->ptrace, but not vice versa */
traced = (p->real_parent != p->parent);
if (likely(!traced)) {
struct signal_struct *psig;
struct signal_struct *sig;
/*
* The resource counters for the group leader are in its
* own task_struct. Those for dead threads in the group
* are in its signal_struct, as are those for the child
* processes it has previously reaped. All these
* accumulate in the parent's signal_struct c* fields.
*
* We don't bother to take a lock here to protect these
* p->signal fields, because they are only touched by
* __exit_signal, which runs with tasklist_lock
* write-locked anyway, and so is excluded here. We do
* need to protect the access to p->parent->signal fields,
* as other threads in the parent group can be right
* here reaping other children at the same time.
*/
spin_lock_irq(&p->parent->sighand->siglock);
psig = p->parent->signal;
sig = p->signal;
psig->cutime =
cputime_add(psig->cutime,
cputime_add(sig->utime,
sig->cutime)));
psig->cstime =
cputime_add(psig->cstime,
cputime_add(sig->stime,
sig->cstime)));
psig->cgtime =
cputime_add(psig->cgtime,
cputime_add(p->gtime,
cputime_add(sig->gtime,
sig->cgtime)));
psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt +=
p->maj_flt + sig->maj_flt + sig->cmaj_flt;
psig->cnvcsw +=
p->nvcsw + sig->nvcsw + sig->cnvcsw;
psig->cnivcsw +=
p->nivcsw + sig->nivcsw + sig->cnivcsw;
psig->cinblock +=
task_io_get_inblock(p) +
sig->inblock + sig->cinblock;
psig->coublock +=
task_io_get_oublock(p) +
sig->oublock + sig->coublock;
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
spin_unlock_irq(&p->parent->sighand->siglock);
}
/*
* Now we are sure this task is interesting, and no other
* thread can reap it because we set its state to EXIT_DEAD.
*/
read_unlock(&tasklist_lock);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
? p->signal->group_exit_code : p->exit_code;
if (!retval && stat_addr)
retval = put_user(status, stat_addr);
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop) {
int why;
if ((status & 0x7f) == 0) {
why = CLD_EXITED;
status >>= 8;
} else {
why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
status &= 0x7f;
}
retval = put_user((short)why, &infop->si_code);
if (!retval)
retval = put_user(status, &infop->si_status);
}
if (!retval && infop)
retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid);
if (!retval && infop)
retval = put_user(p->uid, &infop->si_uid);
if (!retval)
retval = task_pid_nr_ns(p, ns);
if (traced) {
/* We dropped tasklist, ptracer could die and untrace */
ptrace_unlink(p);
/*
* If this is not a detached task, notify the parent.
* If it's still not detached after that, don't release
* it now.
*/
if (p->exit_signal != -1) {
do_notify_parent(p, p->exit_signal);
p->exit_state = EXIT_ZOMBIE;
p = NULL;
}
}
write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
return retval;
}
/*
* Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
int noreap, struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code;
if (!p->exit_code)
return 0;
if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
p->signal->group_stop_count > 0)
/*
* A group stop is in progress and this is the group leader.
* We won't report until all threads have stopped.
*/
return 0;
/*
* Now we are pretty sure this task is interesting.
* Make sure it doesn't get reaped out from under us while we
* give up the lock and then examine it below. We don't want to
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
pid = task_pid_nr_ns(p, current->nsproxy->pid_ns);
get_task_struct(p);
read_unlock(&tasklist_lock);
if (unlikely(noreap)) {
uid_t uid = p->uid;
int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
exit_code = p->exit_code;
if (unlikely(!exit_code) || unlikely(p->exit_state))
goto bail_ref;
return wait_noreap_copyout(p, pid, uid,
why, exit_code,
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
infop, ru);
}
write_lock_irq(&tasklist_lock);
/*
* This uses xchg to be atomic with the thread resuming and setting
* it. It must also be done with the write lock held to prevent a
* race with the EXIT_ZOMBIE case.
*/
exit_code = xchg(&p->exit_code, 0);
if (unlikely(p->exit_state)) {
/*
* The task resumed and then died. Let the next iteration
* catch it in EXIT_ZOMBIE. Note that exit_code might
* already be zero here if it resumed and did _exit(0).
* The task itself is dead and won't touch exit_code again;
* other processors in this function are locked out.
*/
p->exit_code = exit_code;
exit_code = 0;
}
if (unlikely(exit_code == 0)) {
/*
* Another thread in this function got to it first, or it
* resumed, or it resumed and then died.
*/
write_unlock_irq(&tasklist_lock);
bail_ref:
put_task_struct(p);
/*
* We are returning to the wait loop without having successfully
* removed the process and having released the lock. We cannot
* continue, since the "p" task pointer is potentially stale.
*
* Return -EAGAIN, and do_wait() will restart the loop from the
* beginning. Do _not_ re-acquire the lock.
*/
return -EAGAIN;
}
/* move to end of parent's list to avoid starvation */
remove_parent(p);
write_unlock_irq(&tasklist_lock);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (!retval && stat_addr)
retval = put_user((exit_code << 8) | 0x7f, stat_addr);
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop)
retval = put_user((short)((p->ptrace & PT_PTRACED)
? CLD_TRAPPED : CLD_STOPPED),
&infop->si_code);
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(p->uid, &infop->si_uid);
if (!retval)
put_task_struct(p);
BUG_ON(!retval);
return retval;
}
/*
* Handle do_wait work for one task in a live, non-stopped state.
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_continued(struct task_struct *p, int noreap,
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int retval;
pid_t pid;
uid_t uid;
struct pid_namespace *ns;
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
spin_lock_irq(&p->sighand->siglock);
/* Re-check with the lock held. */
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
spin_unlock_irq(&p->sighand->siglock);
return 0;
}
if (!noreap)
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
spin_unlock_irq(&p->sighand->siglock);
ns = current->nsproxy->pid_ns;
pid = task_pid_nr_ns(p, ns);
uid = p->uid;
get_task_struct(p);
read_unlock(&tasklist_lock);
if (!infop) {
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
put_task_struct(p);
if (!retval && stat_addr)
retval = put_user(0xffff, stat_addr);
if (!retval)
retval = task_pid_nr_ns(p, ns);
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
} else {
retval = wait_noreap_copyout(p, pid, uid,
CLD_CONTINUED, SIGCONT,
infop, ru);
BUG_ON(retval == 0);
}
return retval;
}
static inline int my_ptrace_child(struct task_struct *p)
{
if (!(p->ptrace & PT_PTRACED))
return 0;
if (!(p->ptrace & PT_ATTACHED))
return 1;
/*
* This child was PTRACE_ATTACH'd. We should be seeing it only if
* we are the attacher. If we are the real parent, this is a race
* inside ptrace_attach. It is waiting for the tasklist_lock,
* which we have to switch the parent links, but has already set
* the flags in p->ptrace.
*/
return (p->parent != p->real_parent);
}
static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
DECLARE_WAITQUEUE(wait, current);
struct task_struct *tsk;
int flag, retval;
int allowed, denied;
add_wait_queue(¤t->signal->wait_chldexit,&wait);
repeat:
/*
* We will set this flag if we see any child that might later
* match our criteria, even if we are not able to reap it yet.
*/
flag = 0;
allowed = denied = 0;
current->state = TASK_INTERRUPTIBLE;
read_lock(&tasklist_lock);
tsk = current;
do {
struct task_struct *p;
int ret;

Matthias Kaehlcke
committed
list_for_each_entry(p, &tsk->children, sibling) {
ret = eligible_child(pid, options, p);
if (!ret)
continue;
if (unlikely(ret < 0)) {
denied = ret;
continue;
}
allowed = 1;
/*
* It's stopped now, so it might later
* continue, exit, or stop again.
*
* When we hit the race with PTRACE_ATTACH, we
* will not report this child. But the race
* means it has not yet been moved to our
* ptrace_children list, so we need to set the
* flag here to avoid a spurious ECHILD when
* the race happens with the only child.
if (!my_ptrace_child(p)) {
if (task_is_traced(p))
continue;
if (!(options & WUNTRACED))
continue;
}
if (retval == -EAGAIN)
goto repeat;
if (retval != 0) /* He released the lock. */
goto end;
} else if (p->exit_state == EXIT_DEAD) {
continue;
} else if (p->exit_state == EXIT_ZOMBIE) {
/*
* Eligible but we cannot release it yet:
*/
if (ret == 2)
goto check_continued;
if (!likely(options & WEXITED))
retval = wait_task_zombie(p,
(options & WNOWAIT), infop,
stat_addr, ru);
/* He released the lock. */
if (retval != 0)
goto end;
} else {
check_continued:
/*
* It's running now, so it might later
* exit, stop, or stop and then continue.
*/
flag = 1;
if (!unlikely(options & WCONTINUED))
continue;
retval = wait_task_continued(p,
(options & WNOWAIT), infop,
stat_addr, ru);
if (retval != 0) /* He released the lock. */
goto end;
}
}
if (!flag) {

Matthias Kaehlcke
committed
list_for_each_entry(p, &tsk->ptrace_children,
ptrace_list) {
if (!eligible_child(pid, options, p))
continue;
flag = 1;
break;
}
}
if (options & __WNOTHREAD)
break;
tsk = next_thread(tsk);
BUG_ON(tsk->signal != current->signal);
} while (tsk != current);
read_unlock(&tasklist_lock);
if (flag) {
retval = 0;
if (options & WNOHANG)
goto end;
retval = -ERESTARTSYS;
if (signal_pending(current))
goto end;
schedule();
goto repeat;
}
retval = -ECHILD;
if (unlikely(denied) && !allowed)
retval = denied;
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
end:
current->state = TASK_RUNNING;
remove_wait_queue(¤t->signal->wait_chldexit,&wait);
if (infop) {
if (retval > 0)
retval = 0;
else {
/*
* For a WNOHANG return, clear out all the fields
* we would set so the user can easily tell the
* difference.
*/
if (!retval)
retval = put_user(0, &infop->si_signo);
if (!retval)
retval = put_user(0, &infop->si_errno);
if (!retval)
retval = put_user(0, &infop->si_code);
if (!retval)
retval = put_user(0, &infop->si_pid);
if (!retval)
retval = put_user(0, &infop->si_uid);
if (!retval)
retval = put_user(0, &infop->si_status);
}
}
return retval;
}
asmlinkage long sys_waitid(int which, pid_t pid,
struct siginfo __user *infop, int options,
struct rusage __user *ru)
{
long ret;
if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
return -EINVAL;
if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
return -EINVAL;
switch (which) {
case P_ALL:
pid = -1;
break;
case P_PID:
if (pid <= 0)
return -EINVAL;
break;
case P_PGID:
if (pid <= 0)
return -EINVAL;
pid = -pid;
break;
default:
return -EINVAL;
}
ret = do_wait(pid, options, infop, NULL, ru);
/* avoid REGPARM breakage on x86: */
prevent_tail_call(ret);
return ret;
}
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru)
{
long ret;
if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
/* avoid REGPARM breakage on x86: */
prevent_tail_call(ret);
return ret;
}
#ifdef __ARCH_WANT_SYS_WAITPID
/*
* sys_waitpid() remains for compatibility. waitpid() should be
* implemented by calling sys_wait4() from libc.a.
*/
asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
{
return sys_wait4(pid, stat_addr, options, NULL);
}
#endif