Skip to content
Snippets Groups Projects
mempolicy.c 72.5 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * Simple NUMA memory policy for the Linux kernel.
     *
     * Copyright 2003,2004 Andi Kleen, SuSE Labs.
    
     * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * Subject to the GNU Public License, version 2.
     *
     * NUMA policy allows the user to give hints in which node(s) memory should
     * be allocated.
     *
     * Support four policies per VMA and per process:
     *
     * The VMA policy has priority over the process policy for a page fault.
     *
     * interleave     Allocate memory interleaved over a set of nodes,
     *                with normal fallback if it fails.
     *                For VMA based allocations this interleaves based on the
     *                offset into the backing object or offset into the mapping
     *                for anonymous memory. For process policy an process counter
     *                is used.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * bind           Only allocate memory on a specific set of nodes,
     *                no fallback.
    
     *                FIXME: memory is allocated starting with the first node
     *                to the last. It would be better if bind would truly restrict
     *                the allocation to memory nodes instead
     *
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * preferred       Try a specific node first before normal fallback.
    
    David Rientjes's avatar
    David Rientjes committed
     *                As a special case NUMA_NO_NODE here means do the allocation
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *                on the local CPU. This is normally identical to default,
     *                but useful to set in a VMA when you have a non default
     *                process policy.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * default        Allocate on the local node first, or when on a VMA
     *                use the process policy. This is what Linux always did
     *		  in a NUMA aware kernel and still does by, ahem, default.
     *
     * The process policy is applied for most non interrupt memory allocations
     * in that process' context. Interrupts ignore the policies and always
     * try to allocate on the local CPU. The VMA policy is only applied for memory
     * allocations for a VMA in the VM.
     *
     * Currently there are a few corner cases in swapping where the policy
     * is not applied, but the majority should be handled. When process policy
     * is used it is not remembered over swap outs/swap ins.
     *
     * Only the highest zone in the zone hierarchy gets policied. Allocations
     * requesting a lower zone just use default policy. This implies that
     * on systems with highmem kernel lowmem allocation don't get policied.
     * Same with GFP_DMA allocations.
     *
     * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
     * all users and remembered even when nobody has memory mapped.
     */
    
    /* Notebook:
       fix mmap readahead to honour policy and enable policy for any page cache
       object
       statistics for bigpages
       global policy for page cache? currently it uses process policy. Requires
       first item above.
       handle mremap for shared memory (currently ignored for the policy)
       grows down?
       make bind policy root only? It can trigger oom much faster and the
       kernel is not always grateful with that.
    */
    
    #include <linux/mempolicy.h>
    #include <linux/mm.h>
    #include <linux/highmem.h>
    #include <linux/hugetlb.h>
    #include <linux/kernel.h>
    #include <linux/sched.h>
    #include <linux/nodemask.h>
    #include <linux/cpuset.h>
    #include <linux/slab.h>
    #include <linux/string.h>
    
    #include <linux/export.h>
    
    #include <linux/nsproxy.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <linux/interrupt.h>
    #include <linux/init.h>
    #include <linux/compat.h>
    
    #include <linux/swap.h>
    
    #include <linux/seq_file.h>
    #include <linux/proc_fs.h>
    
    #include <linux/migrate.h>
    
    #include <linux/ksm.h>
    
    #include <linux/security.h>
    
    Adrian Bunk's avatar
    Adrian Bunk committed
    #include <linux/syscalls.h>
    
    #include <linux/ctype.h>
    
    #include <linux/mmu_notifier.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <asm/tlbflush.h>
    #include <asm/uaccess.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    #include "internal.h"
    
    
    #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
    
    #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
    
    static struct kmem_cache *policy_cache;
    static struct kmem_cache *sn_cache;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    /* Highest zone. An specific allocation for a zone below that is not
       policied. */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /*
     * run-time system-wide default policy => local allocation
     */
    
    static struct mempolicy default_policy = {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.refcnt = ATOMIC_INIT(1), /* never free it */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    };
    
    
    static struct mempolicy preferred_node_policy[MAX_NUMNODES];
    
    static struct mempolicy *get_task_policy(struct task_struct *p)
    {
    	struct mempolicy *pol = p->mempolicy;
    
    	if (!pol) {
    
    		if (node != NUMA_NO_NODE) {
    			pol = &preferred_node_policy[node];
    			/*
    			 * preferred_node_policy is not initialised early in
    			 * boot
    			 */
    			if (!pol->mode)
    				pol = NULL;
    		}
    
    static const struct mempolicy_operations {
    	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
    
    	/*
    	 * If read-side task has no lock to protect task->mempolicy, write-side
    	 * task will rebind the task->mempolicy by two step. The first step is
    	 * setting all the newly nodes, and the second step is cleaning all the
    	 * disallowed nodes. In this way, we can avoid finding no node to alloc
    	 * page.
    	 * If we have a lock to protect task->mempolicy in read-side, we do
    	 * rebind directly.
    	 *
    	 * step:
    	 * 	MPOL_REBIND_ONCE - do rebind work at once
    	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
    	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
    	 */
    	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
    			enum mpol_rebind_step step);
    
    /* Check that the nodemask contains at least one populated zone */
    
    static int is_valid_nodemask(const nodemask_t *nodemask)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	return nodes_intersects(*nodemask, node_states[N_MEMORY]);
    
    static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
    {
    
    	return pol->flags & MPOL_MODE_FLAGS;
    
    }
    
    static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
    				   const nodemask_t *rel)
    {
    	nodemask_t tmp;
    	nodes_fold(tmp, *orig, nodes_weight(*rel));
    	nodes_onto(*ret, tmp, *rel);
    
    static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
    {
    	if (nodes_empty(*nodes))
    		return -EINVAL;
    	pol->v.nodes = *nodes;
    	return 0;
    }
    
    static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
    {
    	if (!nodes)
    
    		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
    
    	else if (nodes_empty(*nodes))
    		return -EINVAL;			/*  no allowed nodes */
    	else
    		pol->v.preferred_node = first_node(*nodes);
    	return 0;
    }
    
    static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
    {
    	if (!is_valid_nodemask(nodes))
    		return -EINVAL;
    	pol->v.nodes = *nodes;
    	return 0;
    }
    
    
    /*
     * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
     * any, for the new policy.  mpol_new() has already validated the nodes
     * parameter with respect to the policy mode and flags.  But, we need to
     * handle an empty nodemask with MPOL_PREFERRED here.
     *
     * Must be called holding task's alloc_lock to protect task's mems_allowed
     * and mempolicy.  May also be called holding the mmap_semaphore for write.
     */
    
    static int mpol_set_nodemask(struct mempolicy *pol,
    		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
    
    {
    	int ret;
    
    	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
    	if (pol == NULL)
    		return 0;
    
    	/* Check N_MEMORY */
    
    		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
    
    
    	VM_BUG_ON(!nodes);
    	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
    		nodes = NULL;	/* explicit local allocation */
    	else {
    		if (pol->flags & MPOL_F_RELATIVE_NODES)
    
    			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
    
    			nodes_and(nsc->mask2, *nodes, nsc->mask1);
    
    
    		if (mpol_store_user_nodemask(pol))
    			pol->w.user_nodemask = *nodes;
    		else
    			pol->w.cpuset_mems_allowed =
    						cpuset_current_mems_allowed;
    	}
    
    
    	if (nodes)
    		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
    	else
    		ret = mpol_ops[pol->mode].create(pol, NULL);
    
    	return ret;
    }
    
    /*
     * This function just creates a new policy, does some check and simple
     * initialization. You must invoke mpol_set_nodemask() to set nodes.
     */
    
    static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
    				  nodemask_t *nodes)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct mempolicy *policy;
    
    
    	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
    
    David Rientjes's avatar
    David Rientjes committed
    		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
    
    	if (mode == MPOL_DEFAULT) {
    		if (nodes && !nodes_empty(*nodes))
    
    			return ERR_PTR(-EINVAL);
    
    		return NULL;
    
    	VM_BUG_ON(!nodes);
    
    	/*
    	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
    	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
    	 * All other modes require a valid pointer to a non-empty nodemask.
    	 */
    	if (mode == MPOL_PREFERRED) {
    		if (nodes_empty(*nodes)) {
    			if (((flags & MPOL_F_STATIC_NODES) ||
    			     (flags & MPOL_F_RELATIVE_NODES)))
    				return ERR_PTR(-EINVAL);
    		}
    
    	} else if (mode == MPOL_LOCAL) {
    		if (!nodes_empty(*nodes))
    			return ERR_PTR(-EINVAL);
    		mode = MPOL_PREFERRED;
    
    	} else if (nodes_empty(*nodes))
    		return ERR_PTR(-EINVAL);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
    	if (!policy)
    		return ERR_PTR(-ENOMEM);
    	atomic_set(&policy->refcnt, 1);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return policy;
    
    /* Slow path of a mpol destructor. */
    void __mpol_put(struct mempolicy *p)
    {
    	if (!atomic_dec_and_test(&p->refcnt))
    		return;
    	kmem_cache_free(policy_cache, p);
    }
    
    
    static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
    				enum mpol_rebind_step step)
    
    /*
     * step:
     * 	MPOL_REBIND_ONCE  - do rebind work at once
     * 	MPOL_REBIND_STEP1 - set all the newly nodes
     * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
     */
    static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
    				 enum mpol_rebind_step step)
    
    {
    	nodemask_t tmp;
    
    	if (pol->flags & MPOL_F_STATIC_NODES)
    		nodes_and(tmp, pol->w.user_nodemask, *nodes);
    	else if (pol->flags & MPOL_F_RELATIVE_NODES)
    		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
    	else {
    
    		/*
    		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
    		 * result
    		 */
    		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
    			nodes_remap(tmp, pol->v.nodes,
    					pol->w.cpuset_mems_allowed, *nodes);
    			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
    		} else if (step == MPOL_REBIND_STEP2) {
    			tmp = pol->w.cpuset_mems_allowed;
    			pol->w.cpuset_mems_allowed = *nodes;
    		} else
    			BUG();
    
    	if (nodes_empty(tmp))
    		tmp = *nodes;
    
    	if (step == MPOL_REBIND_STEP1)
    		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
    	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
    		pol->v.nodes = tmp;
    	else
    		BUG();
    
    
    	if (!node_isset(current->il_next, tmp)) {
    		current->il_next = next_node(current->il_next, tmp);
    		if (current->il_next >= MAX_NUMNODES)
    			current->il_next = first_node(tmp);
    		if (current->il_next >= MAX_NUMNODES)
    			current->il_next = numa_node_id();
    	}
    }
    
    static void mpol_rebind_preferred(struct mempolicy *pol,
    
    				  const nodemask_t *nodes,
    				  enum mpol_rebind_step step)
    
    {
    	nodemask_t tmp;
    
    	if (pol->flags & MPOL_F_STATIC_NODES) {
    		int node = first_node(pol->w.user_nodemask);
    
    
    		if (node_isset(node, *nodes)) {
    
    			pol->v.preferred_node = node;
    
    			pol->flags &= ~MPOL_F_LOCAL;
    		} else
    			pol->flags |= MPOL_F_LOCAL;
    
    	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
    		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
    		pol->v.preferred_node = first_node(tmp);
    
    	} else if (!(pol->flags & MPOL_F_LOCAL)) {
    
    		pol->v.preferred_node = node_remap(pol->v.preferred_node,
    						   pol->w.cpuset_mems_allowed,
    						   *nodes);
    		pol->w.cpuset_mems_allowed = *nodes;
    	}
    
    /*
     * mpol_rebind_policy - Migrate a policy to a different set of nodes
     *
     * If read-side task has no lock to protect task->mempolicy, write-side
     * task will rebind the task->mempolicy by two step. The first step is
     * setting all the newly nodes, and the second step is cleaning all the
     * disallowed nodes. In this way, we can avoid finding no node to alloc
     * page.
     * If we have a lock to protect task->mempolicy in read-side, we do
     * rebind directly.
     *
     * step:
     * 	MPOL_REBIND_ONCE  - do rebind work at once
     * 	MPOL_REBIND_STEP1 - set all the newly nodes
     * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
     */
    static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
    				enum mpol_rebind_step step)
    
    {
    	if (!pol)
    		return;
    
    	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
    
    	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
    		return;
    
    
    	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
    		return;
    
    	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
    		BUG();
    
    	if (step == MPOL_REBIND_STEP1)
    		pol->flags |= MPOL_F_REBINDING;
    	else if (step == MPOL_REBIND_STEP2)
    		pol->flags &= ~MPOL_F_REBINDING;
    	else if (step >= MPOL_REBIND_NSTEP)
    		BUG();
    
    	mpol_ops[pol->mode].rebind(pol, newmask, step);
    
    }
    
    /*
     * Wrapper for mpol_rebind_policy() that just requires task
     * pointer, and updates task mempolicy.
    
     *
     * Called with task's alloc_lock held.
    
    void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
    			enum mpol_rebind_step step)
    
    	mpol_rebind_policy(tsk->mempolicy, new, step);
    
    }
    
    /*
     * Rebind each vma in mm to new nodemask.
     *
     * Call holding a reference to mm.  Takes mm->mmap_sem during call.
     */
    
    void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
    {
    	struct vm_area_struct *vma;
    
    	down_write(&mm->mmap_sem);
    	for (vma = mm->mmap; vma; vma = vma->vm_next)
    
    		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
    
    	up_write(&mm->mmap_sem);
    }
    
    
    static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
    	[MPOL_DEFAULT] = {
    		.rebind = mpol_rebind_default,
    	},
    	[MPOL_INTERLEAVE] = {
    		.create = mpol_new_interleave,
    		.rebind = mpol_rebind_nodemask,
    	},
    	[MPOL_PREFERRED] = {
    		.create = mpol_new_preferred,
    		.rebind = mpol_rebind_preferred,
    	},
    	[MPOL_BIND] = {
    		.create = mpol_new_bind,
    		.rebind = mpol_rebind_nodemask,
    	},
    };
    
    
    static void migrate_page_add(struct page *page, struct list_head *pagelist,
    				unsigned long flags);
    
    /*
     * Scan through pages checking if pages follow certain conditions,
     * and move them to the pagelist if they do.
     */
    static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
    
    		unsigned long addr, unsigned long end,
    		const nodemask_t *nodes, unsigned long flags,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	pte_t *orig_pte;
    	pte_t *pte;
    
    	spinlock_t *ptl;
    
    	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
    
    		struct page *page;
    
    
    		if (!pte_present(*pte))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    		page = vm_normal_page(vma, addr, *pte);
    		if (!page)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    		 * vm_normal_page() filters out zero pages, but there might
    		 * still be PageReserved pages to skip, perhaps in a VDSO.
    
    		if (PageReserved(page))
    
    		nid = page_to_nid(page);
    
    		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
    			continue;
    
    
    Stephen Wilson's avatar
    Stephen Wilson committed
    		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
    
    			migrate_page_add(page, private, flags);
    
    	} while (pte++, addr += PAGE_SIZE, addr != end);
    
    	pte_unmap_unlock(orig_pte, ptl);
    
    static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
    		pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
    
    				    void *private)
    {
    #ifdef CONFIG_HUGETLB_PAGE
    	int nid;
    	struct page *page;
    
    	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
    
    	page = pte_page(huge_ptep_get((pte_t *)pmd));
    	nid = page_to_nid(page);
    	if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
    		goto unlock;
    	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
    	if (flags & (MPOL_MF_MOVE_ALL) ||
    	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
    		isolate_huge_page(page, private);
    unlock:
    
    static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
    
    		unsigned long addr, unsigned long end,
    		const nodemask_t *nodes, unsigned long flags,
    
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	pmd = pmd_offset(pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    
    		if (!pmd_present(*pmd))
    			continue;
    		if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
    
    			queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
    
    		split_huge_page_pmd(vma, addr, pmd);
    
    		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
    
    		if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
    
    			return -EIO;
    	} while (pmd++, addr = next, addr != end);
    	return 0;
    }
    
    
    static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
    
    		unsigned long addr, unsigned long end,
    		const nodemask_t *nodes, unsigned long flags,
    
    {
    	pud_t *pud;
    	unsigned long next;
    
    	pud = pud_offset(pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    
    		if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
    			continue;
    
    		if (pud_none_or_clear_bad(pud))
    			continue;
    
    		if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
    
    			return -EIO;
    	} while (pud++, addr = next, addr != end);
    	return 0;
    }
    
    
    static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
    
    		unsigned long addr, unsigned long end,
    		const nodemask_t *nodes, unsigned long flags,
    
    {
    	pgd_t *pgd;
    	unsigned long next;
    
    
    	pgd = pgd_offset(vma->vm_mm, addr);
    
    	do {
    		next = pgd_addr_end(addr, end);
    		if (pgd_none_or_clear_bad(pgd))
    			continue;
    
    		if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
    
    			return -EIO;
    	} while (pgd++, addr = next, addr != end);
    	return 0;
    
     * This is used to mark a range of virtual addresses to be inaccessible.
     * These are later cleared by a NUMA hinting fault. Depending on these
     * faults, pages may be migrated for better NUMA placement.
     *
     * This is assuming that NUMA faults are handled using PROT_NONE. If
     * an architecture makes a different choice, it will need further
     * changes to the core.
    
    unsigned long change_prot_numa(struct vm_area_struct *vma,
    			unsigned long addr, unsigned long end)
    
    	nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
    
    	if (nr_updated)
    		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
    
    }
    #else
    static unsigned long change_prot_numa(struct vm_area_struct *vma,
    			unsigned long addr, unsigned long end)
    {
    	return 0;
    }
    
    #endif /* CONFIG_NUMA_BALANCING */
    
     * Walk through page tables and collect pages to be migrated.
     *
     * If pages found in a given range are on a set of nodes (determined by
     * @nodes and @flags,) it's isolated and queued to the pagelist which is
     * passed via @private.)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static struct vm_area_struct *
    
    queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
    
    		const nodemask_t *nodes, unsigned long flags, void *private)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	int err;
    	struct vm_area_struct *first, *vma, *prev;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	first = find_vma(mm, start);
    	if (!first)
    		return ERR_PTR(-EFAULT);
    	prev = NULL;
    	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
    
    		unsigned long endvma = vma->vm_end;
    
    		if (endvma > end)
    			endvma = end;
    		if (vma->vm_start > start)
    			start = vma->vm_start;
    
    
    		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
    			if (!vma->vm_next && vma->vm_end < end)
    				return ERR_PTR(-EFAULT);
    			if (prev && prev->vm_end < vma->vm_start)
    				return ERR_PTR(-EFAULT);
    		}
    
    
    		if (flags & MPOL_MF_LAZY) {
    			change_prot_numa(vma, start, endvma);
    			goto next;
    		}
    
    		if ((flags & MPOL_MF_STRICT) ||
    
    		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
    
    		      vma_migratable(vma))) {
    
    			err = queue_pages_pgd_range(vma, start, endvma, nodes,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (err) {
    				first = ERR_PTR(err);
    				break;
    			}
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		prev = vma;
    	}
    	return first;
    }
    
    
    /*
     * Apply policy to a single VMA
     * This must be called with the mmap_sem held for writing.
     */
    static int vma_replace_policy(struct vm_area_struct *vma,
    						struct mempolicy *pol)
    
    	int err;
    	struct mempolicy *old;
    	struct mempolicy *new;
    
    
    	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
    		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
    		 vma->vm_ops, vma->vm_file,
    		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
    
    
    	new = mpol_dup(pol);
    	if (IS_ERR(new))
    		return PTR_ERR(new);
    
    	if (vma->vm_ops && vma->vm_ops->set_policy) {
    
    		if (err)
    			goto err_out;
    
    
    	old = vma->vm_policy;
    	vma->vm_policy = new; /* protected by mmap_sem */
    	mpol_put(old);
    
    	return 0;
     err_out:
    	mpol_put(new);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* Step 2: apply policy to a range and do splits. */
    
    static int mbind_range(struct mm_struct *mm, unsigned long start,
    		       unsigned long end, struct mempolicy *new_pol)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct vm_area_struct *next;
    
    	struct vm_area_struct *prev;
    	struct vm_area_struct *vma;
    	int err = 0;
    
    	unsigned long vmstart;
    	unsigned long vmend;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	vma = find_vma(mm, start);
    
    	if (!vma || vma->vm_start > start)
    		return -EFAULT;
    
    
    	prev = vma->vm_prev;
    
    	if (start > vma->vm_start)
    		prev = vma;
    
    
    	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		next = vma->vm_next;
    
    		vmstart = max(start, vma->vm_start);
    		vmend   = min(end, vma->vm_end);
    
    
    		if (mpol_equal(vma_policy(vma), new_pol))
    			continue;
    
    		pgoff = vma->vm_pgoff +
    			((vmstart - vma->vm_start) >> PAGE_SHIFT);
    
    		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
    
    				  vma->anon_vma, vma->vm_file, pgoff,
    
    		if (prev) {
    			vma = prev;
    			next = vma->vm_next;
    
    			if (mpol_equal(vma_policy(vma), new_pol))
    				continue;
    			/* vma_merge() joined vma && vma->next, case 8 */
    			goto replace;
    
    		}
    		if (vma->vm_start != vmstart) {
    			err = split_vma(vma->vm_mm, vma, vmstart, 1);
    			if (err)
    				goto out;
    		}
    		if (vma->vm_end != vmend) {
    			err = split_vma(vma->vm_mm, vma, vmend, 0);
    			if (err)
    				goto out;
    		}
    
    		err = vma_replace_policy(vma, new_pol);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return err;
    }
    
    
    /*
     * Update task->flags PF_MEMPOLICY bit: set iff non-default
     * mempolicy.  Allows more rapid checking of this (combined perhaps
     * with other PF_* flag bits) on memory allocation hot code paths.
     *
     * If called from outside this file, the task 'p' should -only- be
     * a newly forked child not yet visible on the task list, because
     * manipulating the task flags of a visible task is not safe.
     *
     * The above limitation is why this routine has the funny name
     * mpol_fix_fork_child_flag().
     *
     * It is also safe to call this with a task pointer of current,
     * which the static wrapper mpol_set_task_struct_flag() does,
     * for use within this file.
     */
    
    void mpol_fix_fork_child_flag(struct task_struct *p)
    {
    	if (p->mempolicy)
    		p->flags |= PF_MEMPOLICY;
    	else
    		p->flags &= ~PF_MEMPOLICY;
    }
    
    static void mpol_set_task_struct_flag(void)
    {
    	mpol_fix_fork_child_flag(current);
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* Set the process memory policy */
    
    static long do_set_mempolicy(unsigned short mode, unsigned short flags,
    			     nodemask_t *nodes)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct mempolicy *new, *old;
    
    	struct mm_struct *mm = current->mm;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	new = mpol_new(mode, flags, nodes);
    	if (IS_ERR(new)) {
    		ret = PTR_ERR(new);
    		goto out;
    	}
    
    	/*
    	 * prevent changing our mempolicy while show_numa_maps()
    	 * is using it.
    	 * Note:  do_set_mempolicy() can be called at init time
    	 * with no 'mm'.
    	 */
    	if (mm)
    		down_write(&mm->mmap_sem);
    
    	task_lock(current);
    
    	ret = mpol_set_nodemask(new, nodes, scratch);
    
    	if (ret) {
    		task_unlock(current);
    		if (mm)
    			up_write(&mm->mmap_sem);
    		mpol_put(new);
    
    	}
    	old = current->mempolicy;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	current->mempolicy = new;
    
    	mpol_set_task_struct_flag();
    
    	if (new && new->mode == MPOL_INTERLEAVE &&
    
    	    nodes_weight(new->v.nodes))
    
    		current->il_next = first_node(new->v.nodes);
    
    	task_unlock(current);
    
    	mpol_put(old);
    
    	ret = 0;
    out:
    	NODEMASK_SCRATCH_FREE(scratch);
    	return ret;
    
    /*
     * Return nodemask for policy for get_mempolicy() query
    
     *
     * Called with task's alloc_lock held
    
     */
    static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	nodes_clear(*nodes);
    
    	case MPOL_BIND:
    		/* Fall through */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	case MPOL_INTERLEAVE:
    
    		*nodes = p->v.nodes;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		break;
    	case MPOL_PREFERRED:
    
    		if (!(p->flags & MPOL_F_LOCAL))
    
    			node_set(p->v.preferred_node, *nodes);
    
    		/* else return empty node mask for local allocation */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		break;
    	default:
    		BUG();
    	}
    }
    
    static int lookup_node(struct mm_struct *mm, unsigned long addr)
    {
    	struct page *p;
    	int err;
    
    	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
    	if (err >= 0) {
    		err = page_to_nid(p);
    		put_page(p);
    	}
    	return err;
    }
    
    /* Retrieve NUMA policy */
    
    Adrian Bunk's avatar
    Adrian Bunk committed
    static long do_get_mempolicy(int *policy, nodemask_t *nmask,
    			     unsigned long addr, unsigned long flags)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct mm_struct *mm = current->mm;
    	struct vm_area_struct *vma = NULL;
    	struct mempolicy *pol = current->mempolicy;
    
    
    	if (flags &
    		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return -EINVAL;
    
    
    	if (flags & MPOL_F_MEMS_ALLOWED) {
    		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
    			return -EINVAL;
    		*policy = 0;	/* just so it's initialized */
    
    		task_lock(current);
    
    		*nmask  = cpuset_current_mems_allowed;
    
    		task_unlock(current);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (flags & MPOL_F_ADDR) {
    
    		/*
    		 * Do NOT fall back to task policy if the
    		 * vma/shared policy at addr is NULL.  We
    		 * want to return MPOL_DEFAULT in this case.
    		 */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		down_read(&mm->mmap_sem);
    		vma = find_vma_intersection(mm, addr, addr+1);
    		if (!vma) {
    			up_read(&mm->mmap_sem);
    			return -EFAULT;
    		}
    		if (vma->vm_ops && vma->vm_ops->get_policy)
    			pol = vma->vm_ops->get_policy(vma, addr);
    		else
    			pol = vma->vm_policy;
    	} else if (addr)
    		return -EINVAL;
    
    	if (!pol)
    
    		pol = &default_policy;	/* indicates default behavior */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (flags & MPOL_F_NODE) {
    		if (flags & MPOL_F_ADDR) {
    			err = lookup_node(mm, addr);
    			if (err < 0)
    				goto out;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		} else if (pol == current->mempolicy &&
    
    				pol->mode == MPOL_INTERLEAVE) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		} else {
    			err = -EINVAL;
    			goto out;
    		}
    
    	} else {
    		*policy = pol == &default_policy ? MPOL_DEFAULT :
    						pol->mode;
    
    		/*
    		 * Internal mempolicy flags must be masked off before exposing
    		 * the policy to userspace.
    		 */
    		*policy |= (pol->flags & MPOL_MODE_FLAGS);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (vma) {
    		up_read(&current->mm->mmap_sem);
    		vma = NULL;
    	}
    
    	err = 0;
    
    		if (mpol_store_user_nodemask(pol)) {
    			*nmask = pol->w.user_nodemask;
    		} else {
    			task_lock(current);
    			get_policy_nodemask(pol, nmask);
    			task_unlock(current);
    		}