Newer
Older
/* This is the point of no return */
current->sas_ss_sp = current->sas_ss_size = 0;
if (current_euid() == current_uid() && current_egid() == current_gid())
set_dumpable(current->mm, 1);
set_dumpable(current->mm, suid_dumpable);
/* Copies the binary name from after last slash */
for (i=0; (ch = *(name++)) != '\0';) {
if (ch == '/')
i = 0; /* overwrite what we wrote */
else
if (i < (sizeof(tcomm) - 1))
tcomm[i++] = ch;
}
tcomm[i] = '\0';
set_task_comm(current, tcomm);
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
*/
current->mm->task_size = TASK_SIZE;
/* install the new credentials */
if (bprm->cred->uid != current_euid() ||
bprm->cred->gid != current_egid()) {
current->pdeath_signal = 0;
} else if (file_permission(bprm->file, MAY_READ) ||
bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) {
set_dumpable(current->mm, suid_dumpable);
/*
* Flush performance counters when crossing a
* security domain:
*/
if (!get_dumpable(current->mm))
perf_event_exit_task(current);
/* An exec changes our domain. We are no longer part of the thread
group */
current->self_exec_id++;
flush_signal_handlers(current, 0);
flush_old_files(current->files);
}
EXPORT_SYMBOL(setup_new_exec);
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
/*
* Prepare credentials and lock ->cred_guard_mutex.
* install_exec_creds() commits the new creds and drops the lock.
* Or, if exec fails before, free_bprm() should release ->cred and
* and unlock.
*/
int prepare_bprm_creds(struct linux_binprm *bprm)
{
if (mutex_lock_interruptible(¤t->cred_guard_mutex))
return -ERESTARTNOINTR;
bprm->cred = prepare_exec_creds();
if (likely(bprm->cred))
return 0;
mutex_unlock(¤t->cred_guard_mutex);
return -ENOMEM;
}
void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
mutex_unlock(¤t->cred_guard_mutex);
abort_creds(bprm->cred);
}
kfree(bprm);
}
/*
* install the new credentials for this executable
*/
void install_exec_creds(struct linux_binprm *bprm)
{
security_bprm_committing_creds(bprm);
commit_creds(bprm->cred);
bprm->cred = NULL;
/*
* cred_guard_mutex must be held at least to this point to prevent
* ptrace_attach() from altering our determination of the task's
* credentials; any time after this it may be unlocked.
*/
security_bprm_committed_creds(bprm);
mutex_unlock(¤t->cred_guard_mutex);
}
EXPORT_SYMBOL(install_exec_creds);
/*
* determine how safe it is to execute the proposed program
* - the caller must hold current->cred_guard_mutex to protect against
* PTRACE_ATTACH
*/
int check_unsafe_exec(struct linux_binprm *bprm)
unsigned n_fs;
bprm->unsafe = tracehook_unsafe_exec(p);
for (t = next_thread(p); t != p; t = next_thread(t)) {
if (t->fs == p->fs)
n_fs++;
}
rcu_read_unlock();
if (p->fs->users > n_fs) {
bprm->unsafe |= LSM_UNSAFE_SHARE;
res = -EAGAIN;
if (!p->fs->in_exec) {
p->fs->in_exec = 1;
res = 1;
}
}
write_unlock(&p->fs->lock);
return res;
/*
* Fill the binprm structure from the inode.
* Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
*
* This may be called multiple times for binary chains (scripts for example).
*/
int prepare_binprm(struct linux_binprm *bprm)
{
umode_t mode;
struct inode * inode = bprm->file->f_path.dentry->d_inode;
int retval;
mode = inode->i_mode;
if (bprm->file->f_op == NULL)
return -EACCES;
/* clear any previous set[ug]id data from a previous binary */
bprm->cred->euid = current_euid();
bprm->cred->egid = current_egid();
if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->euid = inode->i_uid;
}
/* Set-gid? */
/*
* If setgid is set but no group execute bit then this
* is a candidate for mandatory locking, not a setgid
* executable.
*/
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->egid = inode->i_gid;
}
}
/* fill in binprm security blob */
retval = security_bprm_set_creds(bprm);
bprm->cred_prepared = 1;
memset(bprm->buf, 0, BINPRM_BUF_SIZE);
return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
/*
* Arguments are '\0' separated strings found at the location bprm->p
* points to; chop off the first by relocating brpm->p to right after
* the first '\0' encountered.
*/
int remove_arg_zero(struct linux_binprm *bprm)
int ret = 0;
unsigned long offset;
char *kaddr;
struct page *page;
do {
offset = bprm->p & ~PAGE_MASK;
page = get_arg_page(bprm, bprm->p, 0);
if (!page) {
ret = -EFAULT;
goto out;
}
kaddr = kmap_atomic(page, KM_USER0);
for (; offset < PAGE_SIZE && kaddr[offset];
offset++, bprm->p++)
;
kunmap_atomic(kaddr, KM_USER0);
put_arg_page(page);
if (offset == PAGE_SIZE)
free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
} while (offset == PAGE_SIZE);
bprm->p++;
bprm->argc--;
ret = 0;
}
EXPORT_SYMBOL(remove_arg_zero);
/*
* cycle the list of binary formats handler, until one recognizes the image
*/
int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
{
unsigned int depth = bprm->recursion_depth;
int try,retval;
struct linux_binfmt *fmt;
retval = security_bprm_check(bprm);
if (retval)
return retval;
/* kernel module loader fixup */
/* so we don't try to load run modprobe in kernel space. */
set_fs(USER_DS);
retval = audit_bprm(bprm);
if (retval)
return retval;
retval = -ENOENT;
for (try=0; try<2; try++) {
read_lock(&binfmt_lock);
list_for_each_entry(fmt, &formats, lh) {
int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
if (!fn)
continue;
if (!try_module_get(fmt->module))
continue;
read_unlock(&binfmt_lock);
retval = fn(bprm, regs);
/*
* Restore the depth counter to its starting value
* in this call, so we don't have to rely on every
* load_binary function to restore it on return.
*/
bprm->recursion_depth = depth;
if (depth == 0)
tracehook_report_exec(fmt, bprm, regs);
put_binfmt(fmt);
allow_write_access(bprm->file);
if (bprm->file)
fput(bprm->file);
bprm->file = NULL;
current->did_exec = 1;
return retval;
}
read_lock(&binfmt_lock);
put_binfmt(fmt);
if (retval != -ENOEXEC || bprm->mm == NULL)
break;
if (!bprm->file) {
read_unlock(&binfmt_lock);
return retval;
}
}
read_unlock(&binfmt_lock);
if (retval != -ENOEXEC || bprm->mm == NULL) {
break;
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
if (printable(bprm->buf[0]) &&
printable(bprm->buf[1]) &&
printable(bprm->buf[2]) &&
printable(bprm->buf[3]))
break; /* -ENOEXEC */
request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
#endif
}
}
return retval;
}
EXPORT_SYMBOL(search_binary_handler);
/*
* sys_execve() executes a new program.
*/
int do_execve(char * filename,
char __user *__user *argv,
char __user *__user *envp,
struct pt_regs * regs)
{
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
bool clear_in_exec;
retval = unshare_files(&displaced);
if (retval)
goto out_ret;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
goto out_files;
retval = prepare_bprm_creds(bprm);
if (retval)
goto out_free;
if (retval < 0)
goto out_free;
clear_in_exec = retval;
current->in_execve = 1;
file = open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file))
sched_exec();
bprm->file = file;
bprm->filename = filename;
bprm->interp = filename;
retval = bprm_mm_init(bprm);
if (retval)
goto out_file;
bprm->argc = count(argv, MAX_ARG_STRINGS);
goto out;
bprm->envc = count(envp, MAX_ARG_STRINGS);
if ((retval = bprm->envc) < 0)
goto out;
retval = prepare_binprm(bprm);
if (retval < 0)
goto out;
retval = copy_strings_kernel(1, &bprm->filename, bprm);
if (retval < 0)
goto out;
bprm->exec = bprm->p;
retval = copy_strings(bprm->envc, envp, bprm);
if (retval < 0)
goto out;
retval = copy_strings(bprm->argc, argv, bprm);
if (retval < 0)
goto out;
if (retval < 0)
goto out;
current->stack_start = current->mm->start_stack;
/* execve succeeded */
acct_update_integrals(current);
free_bprm(bprm);
if (displaced)
put_files_struct(displaced);
return retval;
out_file:
if (bprm->file) {
allow_write_access(bprm->file);
fput(bprm->file);
}
if (clear_in_exec)
current->fs->in_exec = 0;
out_free:
out_files:
if (displaced)
reset_files_struct(displaced);
void set_binfmt(struct linux_binfmt *new)
struct mm_struct *mm = current->mm;
if (mm->binfmt)
module_put(mm->binfmt->module);
mm->binfmt = new;
if (new)
__module_get(new->module);
}
EXPORT_SYMBOL(set_binfmt);
/* format_corename will inspect the pattern parameter, and output a
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/
static int format_corename(char *corename, long signr)
const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern;
int ispipe = (*pat_ptr == '|');
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
char *out_ptr = corename;
char *const out_end = corename + CORENAME_MAX_SIZE;
int rc;
int pid_in_pattern = 0;
/* Repeat as long as we have more pattern to process and more output
space */
while (*pat_ptr) {
if (*pat_ptr != '%') {
if (out_ptr == out_end)
goto out;
*out_ptr++ = *pat_ptr++;
} else {
switch (*++pat_ptr) {
case 0:
goto out;
/* Double percent, output one percent */
case '%':
if (out_ptr == out_end)
goto out;
*out_ptr++ = '%';
break;
/* pid */
case 'p':
pid_in_pattern = 1;
rc = snprintf(out_ptr, out_end - out_ptr,
"%d", task_tgid_vnr(current));
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
break;
/* uid */
case 'u':
rc = snprintf(out_ptr, out_end - out_ptr,
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
break;
/* gid */
case 'g':
rc = snprintf(out_ptr, out_end - out_ptr,
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
break;
/* signal that caused the coredump */
case 's':
rc = snprintf(out_ptr, out_end - out_ptr,
"%ld", signr);
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
break;
/* UNIX time of coredump */
case 't': {
struct timeval tv;
do_gettimeofday(&tv);
rc = snprintf(out_ptr, out_end - out_ptr,
"%lu", tv.tv_sec);
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
break;
}
/* hostname */
case 'h':
down_read(&uts_sem);
rc = snprintf(out_ptr, out_end - out_ptr,
"%s", utsname()->nodename);
up_read(&uts_sem);
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
break;
/* executable */
case 'e':
rc = snprintf(out_ptr, out_end - out_ptr,
"%s", current->comm);
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
break;

Neil Horman
committed
/* core limit size */
case 'c':
rc = snprintf(out_ptr, out_end - out_ptr,
"%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
break;
default:
break;
}
++pat_ptr;
}
}
/* Backward compatibility with core_uses_pid:
*
* If core_pattern does not include a %p (as is the default)
* and core_uses_pid is set, then .%pid will be appended to
* the filename. Do not do this for piped commands. */
if (!ispipe && !pid_in_pattern && core_uses_pid) {
".%d", task_tgid_vnr(current));
if (rc > out_end - out_ptr)
goto out;
out_ptr += rc;
}
return ispipe;
static int zap_process(struct task_struct *start)
{
struct task_struct *t;
start->signal->flags = SIGNAL_GROUP_EXIT;
start->signal->group_stop_count = 0;
t = start;
do {
if (t != current && t->mm) {
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
} while_each_thread(start, t);
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
struct core_state *core_state, int exit_code)
int nr = -EAGAIN;
spin_lock_irq(&tsk->sighand->siglock);
if (!signal_group_exit(tsk->signal)) {
mm->core_state = core_state;
tsk->signal->group_exit_code = exit_code;
nr = zap_process(tsk);
spin_unlock_irq(&tsk->sighand->siglock);
if (unlikely(nr < 0))
return nr;
if (atomic_read(&mm->mm_users) == nr + 1)
/*
* We should find and kill all tasks which use this mm, and we should
* count them correctly into ->nr_threads. We don't take tasklist
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
* lock, but this is safe wrt:
*
* fork:
* None of sub-threads can fork after zap_process(leader). All
* processes which were created before this point should be
* visible to zap_threads() because copy_process() adds the new
* process to the tail of init_task.tasks list, and lock/unlock
* of ->siglock provides a memory barrier.
*
* do_exit:
* The caller holds mm->mmap_sem. This means that the task which
* uses this mm can't pass exit_mm(), so it can't exit or clear
* its ->mm.
*
* de_thread:
* It does list_replace_rcu(&leader->tasks, ¤t->tasks),
* we must see either old or new leader, this does not matter.
* However, it can change p->sighand, so lock_task_sighand(p)
* must be used. Since p->mm != NULL and we hold ->mmap_sem
* it can't fail.
*
* Note also that "g" can be the old leader with ->mm == NULL
* and already unhashed and thus removed from ->thread_group.
* This is OK, __unhash_process()->list_del_rcu() does not
* clear the ->next pointer, we will find the new leader via
* next_thread().
*/
if (g == tsk->group_leader)
continue;
if (g->flags & PF_KTHREAD)
continue;
p = g;
do {
if (p->mm) {
if (unlikely(p->mm == mm)) {
lock_task_sighand(p, &flags);
nr += zap_process(p);
unlock_task_sighand(p, &flags);
}
} while_each_thread(g, p);
atomic_set(&core_state->nr_threads, nr);
static int coredump_wait(int exit_code, struct core_state *core_state)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
struct completion *vfork_done;
init_completion(&core_state->startup);
core_state->dumper.task = tsk;
core_state->dumper.next = NULL;
core_waiters = zap_threads(tsk, mm, core_state, exit_code);
if (unlikely(core_waiters < 0))
goto fail;
/*
* Make sure nobody is waiting for us to release the VM,
* otherwise we can deadlock when we wait on each other
*/
vfork_done = tsk->vfork_done;
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}
wait_for_completion(&core_state->startup);
fail:
return core_waiters;
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
static void coredump_finish(struct mm_struct *mm)
{
struct core_thread *curr, *next;
struct task_struct *task;
next = mm->core_state->dumper.next;
while ((curr = next) != NULL) {
next = curr->next;
task = curr->task;
/*
* see exit_mm(), curr->task must not see
* ->task == NULL before we read ->next.
*/
smp_mb();
curr->task = NULL;
wake_up_process(task);
}
mm->core_state = NULL;
}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
/*
* set_dumpable converts traditional three-value dumpable to two flags and
* stores them into mm->flags. It modifies lower two bits of mm->flags, but
* these bits are not changed atomically. So get_dumpable can observe the
* intermediate state. To avoid doing unexpected behavior, get get_dumpable
* return either old dumpable or new one by paying attention to the order of
* modifying the bits.
*
* dumpable | mm->flags (binary)
* old new | initial interim final
* ---------+-----------------------
* 0 1 | 00 01 01
* 0 2 | 00 10(*) 11
* 1 0 | 01 00 00
* 1 2 | 01 11 11
* 2 0 | 11 10(*) 00
* 2 1 | 11 11 01
*
* (*) get_dumpable regards interim value of 10 as 11.
*/
void set_dumpable(struct mm_struct *mm, int value)
{
switch (value) {
case 0:
clear_bit(MMF_DUMPABLE, &mm->flags);
smp_wmb();
clear_bit(MMF_DUMP_SECURELY, &mm->flags);
break;
case 1:
set_bit(MMF_DUMPABLE, &mm->flags);
smp_wmb();
clear_bit(MMF_DUMP_SECURELY, &mm->flags);
break;
case 2:
set_bit(MMF_DUMP_SECURELY, &mm->flags);
smp_wmb();
set_bit(MMF_DUMPABLE, &mm->flags);
break;
}
}
int get_dumpable(struct mm_struct *mm)
{
int ret;
ret = mm->flags & 0x3;
return (ret >= 2) ? 2 : ret;
}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
static void wait_for_dump_helpers(struct file *file)
{
struct pipe_inode_info *pipe;
pipe = file->f_path.dentry->d_inode->i_pipe;
pipe_lock(pipe);
pipe->readers++;
pipe->writers--;
while ((pipe->readers > 1) && (!signal_pending(current))) {
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
pipe_wait(pipe);
}
pipe->readers--;
pipe->writers++;
pipe_unlock(pipe);
}
void do_coredump(long signr, int exit_code, struct pt_regs *regs)
struct core_state core_state;
char corename[CORENAME_MAX_SIZE + 1];
struct mm_struct *mm = current->mm;
struct linux_binfmt * binfmt;
struct inode * inode;
const struct cred *old_cred;
struct cred *cred;
int ispipe = 0;

Neil Horman
committed
char **helper_argv = NULL;
int helper_argc = 0;
int dump_count = 0;
static atomic_t core_dump_count = ATOMIC_INIT(0);
struct coredump_params cprm = {
.signr = signr,
.regs = regs,
.limit = current->signal->rlim[RLIMIT_CORE].rlim_cur,
};
binfmt = mm->binfmt;
cred = prepare_creds();
if (!cred) {
retval = -ENOMEM;
goto fail;
}
/*
* If another thread got here first, or we are not dumpable, bail out.
*/
if (mm->core_state || !get_dumpable(mm)) {
/*
* We cannot trust fsuid as being the "true" uid of the
* process nor do we know its entire history. We only know it
* was tainted so we dump it as root in mode 2.
*/
if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
cred->fsuid = 0; /* Dump root private */
retval = coredump_wait(exit_code, &core_state);
if (retval < 0) {
put_cred(cred);
}
old_cred = override_creds(cred);
/*
* Clear any false indication of pending signals that might
* be seen by the filesystem code called to write the core file.
*/
clear_thread_flag(TIF_SIGPENDING);
/*
* lock_kernel() because format_corename() is controlled by sysctl, which
* uses lock_kernel()
*/
lock_kernel();
ispipe = format_corename(corename, signr);
if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
goto fail_unlock;
if (ispipe) {
/*
* Normally core limits are irrelevant to pipes, since
* we're not writing to the file system, but we use
* cprm.limit of 0 here as a speacial value. Any
* non-zero limit gets set to RLIM_INFINITY below, but
* a limit of 0 skips the dump. This is a consistent
* way to catch recursive crashes. We can still crash
* if the core_pattern binary sets RLIM_CORE = !0
* but it runs as root, and can do lots of stupid things
* Note that we use task_tgid_vnr here to grab the pid
* of the process group leader. That way we get the
* right pid if a thread in a multi-threaded
* core_pattern process dies.
*/
printk(KERN_WARNING
"Process %d(%s) has RLIMIT_CORE set to 0\n",
task_tgid_vnr(current), current->comm);
printk(KERN_WARNING "Aborting core\n");
goto fail_unlock;
}
dump_count = atomic_inc_return(&core_dump_count);
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
printk(KERN_WARNING "Skipping core dump\n");
goto fail_dropcount;
}

Neil Horman
committed
helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
if (!helper_argv) {
printk(KERN_WARNING "%s failed to allocate memory\n",
__func__);
goto fail_dropcount;
cprm.limit = RLIM_INFINITY;
/* SIGPIPE can happen, but it's just never processed */
if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
printk(KERN_INFO "Core dump to %s pipe failed\n",
corename);
goto fail_dropcount;
}
} else
cprm.file = filp_open(corename,
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
0600);
goto fail_dropcount;
inode = cprm.file->f_path.dentry->d_inode;
if (inode->i_nlink > 1)
goto close_fail; /* multiple links - don't dump */
if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
/* AK: actually i see no reason to not allow this for named pipes etc.,
but keep the previous behaviour for now. */
if (!ispipe && !S_ISREG(inode->i_mode))
/*
* Dont allow local users get cute and trick others to coredump
* into their pre-created files:
*/
if (inode->i_uid != current_fsuid())
if (!cprm.file->f_op->write)
if (!ispipe &&
do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
retval = binfmt->core_dump(&cprm);
if (retval)
current->signal->group_exit_code |= 0x80;
close_fail:
if (ispipe && core_pipe_limit)
wait_for_dump_helpers(cprm.file);
filp_close(cprm.file, NULL);
fail_dropcount:
if (dump_count)
atomic_dec(&core_dump_count);

Neil Horman
committed
if (helper_argv)
argv_free(helper_argv);
revert_creds(old_cred);
put_cred(cred);