Skip to content
Snippets Groups Projects
memory.c 92.6 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     *  linux/mm/memory.c
     *
     *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
     */
    
    /*
     * demand-loading started 01.12.91 - seems it is high on the list of
     * things wanted, and it should be easy to implement. - Linus
     */
    
    /*
     * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
     * pages started 02.12.91, seems to work. - Linus.
     *
     * Tested sharing by executing about 30 /bin/sh: under the old kernel it
     * would have taken more than the 6M I have free, but it worked well as
     * far as I could see.
     *
     * Also corrected some "invalidate()"s - I wasn't doing enough of them.
     */
    
    /*
     * Real VM (paging to/from disk) started 18.12.91. Much more work and
     * thought has to go into this. Oh, well..
     * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
     *		Found it. Everything seems to work now.
     * 20.12.91  -  Ok, making the swap-device changeable like the root.
     */
    
    /*
     * 05.04.94  -  Multi-page memory management added for v1.1.
     * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
     *
     * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
     *		(Gerhard.Wichert@pdb.siemens.de)
     *
     * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
     */
    
    #include <linux/kernel_stat.h>
    #include <linux/mm.h>
    #include <linux/hugetlb.h>
    #include <linux/mman.h>
    #include <linux/swap.h>
    #include <linux/highmem.h>
    #include <linux/pagemap.h>
    
    #include <linux/ksm.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <linux/rmap.h>
    #include <linux/module.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <linux/init.h>
    
    #include <linux/writeback.h>
    
    #include <linux/memcontrol.h>
    
    Andrea Arcangeli's avatar
    Andrea Arcangeli committed
    #include <linux/mmu_notifier.h>
    
    #include <linux/kallsyms.h>
    #include <linux/swapops.h>
    #include <linux/elf.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    #include <asm/io.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <asm/pgalloc.h>
    #include <asm/uaccess.h>
    #include <asm/tlb.h>
    #include <asm/tlbflush.h>
    #include <asm/pgtable.h>
    
    
    #ifndef CONFIG_NEED_MULTIPLE_NODES
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* use the per-pgdat data instead for discontigmem - mbligh */
    unsigned long max_mapnr;
    struct page *mem_map;
    
    EXPORT_SYMBOL(max_mapnr);
    EXPORT_SYMBOL(mem_map);
    #endif
    
    unsigned long num_physpages;
    /*
     * A number of key systems in x86 including ioremap() rely on the assumption
     * that high_memory defines the upper bound on direct map memory, then end
     * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
     * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
     * and ZONE_HIGHMEM.
     */
    void * high_memory;
    
    EXPORT_SYMBOL(num_physpages);
    EXPORT_SYMBOL(high_memory);
    
    
    /*
     * Randomize the address space (stacks, mmaps, brk, etc.).
     *
     * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
     *   as ancient (libc5 based) binaries can segfault. )
     */
    int randomize_va_space __read_mostly =
    #ifdef CONFIG_COMPAT_BRK
    					1;
    #else
    					2;
    #endif
    
    
    static int __init disable_randmaps(char *s)
    {
    	randomize_va_space = 0;
    
    unsigned long zero_pfn __read_mostly;
    
    unsigned long highest_memmap_pfn __read_mostly;
    
    Hugh Dickins's avatar
    Hugh Dickins committed
    
    /*
     * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
     */
    static int __init init_zero_pfn(void)
    {
    	zero_pfn = page_to_pfn(ZERO_PAGE(0));
    	return 0;
    }
    core_initcall(init_zero_pfn);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * If a p?d_bad entry is found while walking page tables, report
     * the error, before resetting entry to p?d_none.  Usually (but
     * very seldom) called out from the p?d_none_or_clear_bad macros.
     */
    
    void pgd_clear_bad(pgd_t *pgd)
    {
    	pgd_ERROR(*pgd);
    	pgd_clear(pgd);
    }
    
    void pud_clear_bad(pud_t *pud)
    {
    	pud_ERROR(*pud);
    	pud_clear(pud);
    }
    
    void pmd_clear_bad(pmd_t *pmd)
    {
    	pmd_ERROR(*pmd);
    	pmd_clear(pmd);
    }
    
    /*
     * Note: this doesn't free the actual pages themselves. That
     * has been handled earlier when unmapping all the memory regions.
     */
    
    static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
    			   unsigned long addr)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	pgtable_t token = pmd_pgtable(*pmd);
    
    	pmd_clear(pmd);
    
    	pte_free_tlb(tlb, token, addr);
    
    	tlb->mm->nr_ptes--;
    
    static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
    				unsigned long addr, unsigned long end,
    				unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	unsigned long start;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pmd = pmd_offset(pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    		if (pmd_none_or_clear_bad(pmd))
    			continue;
    
    		free_pte_range(tlb, pmd, addr);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pmd++, addr = next, addr != end);
    
    
    	start &= PUD_MASK;
    	if (start < floor)
    		return;
    	if (ceiling) {
    		ceiling &= PUD_MASK;
    		if (!ceiling)
    			return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (end - 1 > ceiling - 1)
    		return;
    
    	pmd = pmd_offset(pud, start);
    	pud_clear(pud);
    
    	pmd_free_tlb(tlb, pmd, start);
    
    static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
    				unsigned long addr, unsigned long end,
    				unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pud_t *pud;
    	unsigned long next;
    
    	unsigned long start;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pud = pud_offset(pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    		if (pud_none_or_clear_bad(pud))
    			continue;
    
    		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pud++, addr = next, addr != end);
    
    
    	start &= PGDIR_MASK;
    	if (start < floor)
    		return;
    	if (ceiling) {
    		ceiling &= PGDIR_MASK;
    		if (!ceiling)
    			return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (end - 1 > ceiling - 1)
    		return;
    
    	pud = pud_offset(pgd, start);
    	pgd_clear(pgd);
    
    	pud_free_tlb(tlb, pud, start);
    
     * This function frees user-level page tables of a process.
     *
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * Must be called with pagetable lock held.
     */
    
    void free_pgd_range(struct mmu_gather *tlb,
    
    			unsigned long addr, unsigned long end,
    			unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pgd_t *pgd;
    	unsigned long next;
    
    	unsigned long start;
    
    	/*
    	 * The next few lines have given us lots of grief...
    	 *
    	 * Why are we testing PMD* at this top level?  Because often
    	 * there will be no work to do at all, and we'd prefer not to
    	 * go all the way down to the bottom just to discover that.
    	 *
    	 * Why all these "- 1"s?  Because 0 represents both the bottom
    	 * of the address space and the top of it (using -1 for the
    	 * top wouldn't help much: the masks would do the wrong thing).
    	 * The rule is that addr 0 and floor 0 refer to the bottom of
    	 * the address space, but end 0 and ceiling 0 refer to the top
    	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
    	 * that end 0 case should be mythical).
    	 *
    	 * Wherever addr is brought up or ceiling brought down, we must
    	 * be careful to reject "the opposite 0" before it confuses the
    	 * subsequent tests.  But what about where end is brought down
    	 * by PMD_SIZE below? no, end can't go down to 0 there.
    	 *
    	 * Whereas we round start (addr) and ceiling down, by different
    	 * masks at different levels, in order to test whether a table
    	 * now has no other vmas using it, so can be freed, we don't
    	 * bother to round floor or end up - the tests don't need that.
    	 */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	addr &= PMD_MASK;
    	if (addr < floor) {
    		addr += PMD_SIZE;
    		if (!addr)
    			return;
    	}
    	if (ceiling) {
    		ceiling &= PMD_MASK;
    		if (!ceiling)
    			return;
    	}
    	if (end - 1 > ceiling - 1)
    		end -= PMD_SIZE;
    	if (addr > end - 1)
    		return;
    
    	start = addr;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	do {
    		next = pgd_addr_end(addr, end);
    		if (pgd_none_or_clear_bad(pgd))
    			continue;
    
    		free_pud_range(tlb, pgd, addr, next, floor, ceiling);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pgd++, addr = next, addr != end);
    
    void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
    
    		unsigned long floor, unsigned long ceiling)
    
    {
    	while (vma) {
    		struct vm_area_struct *next = vma->vm_next;
    		unsigned long addr = vma->vm_start;
    
    
    		/*
    		 * Hide vma from rmap and vmtruncate before freeing pgtables
    		 */
    		anon_vma_unlink(vma);
    		unlink_file_vma(vma);
    
    
    		if (is_vm_hugetlb_page(vma)) {
    
    			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
    
    				floor, next? next->vm_start: ceiling);
    
    		} else {
    			/*
    			 * Optimization: gather nearby vmas into one call down
    			 */
    			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
    
    			       && !is_vm_hugetlb_page(next)) {
    
    				vma = next;
    				next = vma->vm_next;
    
    				anon_vma_unlink(vma);
    				unlink_file_vma(vma);
    
    			}
    			free_pgd_range(tlb, addr, vma->vm_end,
    				floor, next? next->vm_start: ceiling);
    		}
    
    int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	pgtable_t new = pte_alloc_one(mm, address);
    
    	if (!new)
    		return -ENOMEM;
    
    
    	/*
    	 * Ensure all pte setup (eg. pte page lock and page clearing) are
    	 * visible before the pte is made visible to other CPUs by being
    	 * put into page tables.
    	 *
    	 * The other side of the story is the pointer chasing in the page
    	 * table walking code (when walking the page table without locking;
    	 * ie. most of the time). Fortunately, these data accesses consist
    	 * of a chain of data-dependent loads, meaning most CPUs (alpha
    	 * being the notable exception) will already guarantee loads are
    	 * seen in-order. See the alpha page table accessors for the
    	 * smp_read_barrier_depends() barriers in page table walking code.
    	 */
    	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
    
    
    	spin_lock(&mm->page_table_lock);
    
    	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		mm->nr_ptes++;
    		pmd_populate(mm, pmd, new);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	spin_unlock(&mm->page_table_lock);
    
    	if (new)
    		pte_free(mm, new);
    
    int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
    	if (!new)
    		return -ENOMEM;
    
    
    	smp_wmb(); /* See comment in __pte_alloc */
    
    
    	spin_lock(&init_mm.page_table_lock);
    
    	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
    
    		pmd_populate_kernel(&init_mm, pmd, new);
    
    	spin_unlock(&init_mm.page_table_lock);
    
    	if (new)
    		pte_free_kernel(&init_mm, new);
    
    static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
    {
    	if (file_rss)
    		add_mm_counter(mm, file_rss, file_rss);
    	if (anon_rss)
    		add_mm_counter(mm, anon_rss, anon_rss);
    }
    
    
     * This function is called to print an error when a bad pte
     * is found. For example, we might have a PFN-mapped pte in
     * a region that doesn't allow it.
    
     *
     * The calling function must still handle the error.
     */
    
    static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
    			  pte_t pte, struct page *page)
    
    	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
    	pud_t *pud = pud_offset(pgd, addr);
    	pmd_t *pmd = pmd_offset(pud, addr);
    	struct address_space *mapping;
    	pgoff_t index;
    
    	static unsigned long resume;
    	static unsigned long nr_shown;
    	static unsigned long nr_unshown;
    
    	/*
    	 * Allow a burst of 60 reports, then keep quiet for that minute;
    	 * or allow a steady drip of one report per second.
    	 */
    	if (nr_shown == 60) {
    		if (time_before(jiffies, resume)) {
    			nr_unshown++;
    			return;
    		}
    		if (nr_unshown) {
    
    			printk(KERN_ALERT
    				"BUG: Bad page map: %lu messages suppressed\n",
    
    				nr_unshown);
    			nr_unshown = 0;
    		}
    		nr_shown = 0;
    	}
    	if (nr_shown++ == 0)
    		resume = jiffies + 60 * HZ;
    
    
    	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
    	index = linear_page_index(vma, addr);
    
    
    	printk(KERN_ALERT
    		"BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
    
    		current->comm,
    		(long long)pte_val(pte), (long long)pmd_val(*pmd));
    	if (page) {
    
    		printk(KERN_ALERT
    
    		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
    		page, (void *)page->flags, page_count(page),
    		page_mapcount(page), page->mapping, page->index);
    	}
    
    	printk(KERN_ALERT
    
    		"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
    		(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
    	/*
    	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
    	 */
    	if (vma->vm_ops)
    
    		print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
    
    				(unsigned long)vma->vm_ops->fault);
    	if (vma->vm_file && vma->vm_file->f_op)
    
    		print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
    
    				(unsigned long)vma->vm_file->f_op->mmap);
    
    	dump_stack();
    
    	add_taint(TAINT_BAD_PAGE);
    
    static inline int is_cow_mapping(unsigned int flags)
    {
    	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
    }
    
    
    #ifndef is_zero_pfn
    static inline int is_zero_pfn(unsigned long pfn)
    {
    	return pfn == zero_pfn;
    }
    #endif
    
    #ifndef my_zero_pfn
    static inline unsigned long my_zero_pfn(unsigned long addr)
    {
    	return zero_pfn;
    }
    #endif
    
    
     * vm_normal_page -- This function gets the "struct page" associated with a pte.
    
     * "Special" mappings do not wish to be associated with a "struct page" (either
     * it doesn't exist, or it exists but they don't want to touch it). In this
     * case, NULL is returned here. "Normal" mappings do have a struct page.
    
     * There are 2 broad cases. Firstly, an architecture may define a pte_special()
     * pte bit, in which case this function is trivial. Secondly, an architecture
     * may not have a spare pte bit, which requires a more complicated scheme,
     * described below.
     *
     * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
     * special mapping (even if there are underlying and valid "struct pages").
     * COWed pages of a VM_PFNMAP are always normal.
    
    Jared Hulbert's avatar
    Jared Hulbert committed
     * The way we recognize COWed pages within VM_PFNMAP mappings is through the
     * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
    
     * set, and the vm_pgoff will point to the first PFN mapped: thus every special
     * mapping will always honor the rule
    
     *
     *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
     *
    
     * And for normal mappings this is false.
     *
     * This restricts such mappings to be a linear translation from virtual address
     * to pfn. To get around this restriction, we allow arbitrary mappings so long
     * as the vma is not a COW mapping; in that case, we know that all ptes are
     * special (because none can have been COWed).
    
     * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
    
    Jared Hulbert's avatar
    Jared Hulbert committed
     *
     * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
     * page" backing, however the difference is that _all_ pages with a struct
     * page (that is, those where pfn_valid is true) are refcounted and considered
     * normal pages by the VM. The disadvantage is that pages are refcounted
     * (which can be slower and simply not an option for some PFNMAP users). The
     * advantage is that we don't have to follow the strict linearity rule of
     * PFNMAP mappings in order to support COWable mappings.
     *
    
    #ifdef __HAVE_ARCH_PTE_SPECIAL
    # define HAVE_PTE_SPECIAL 1
    #else
    # define HAVE_PTE_SPECIAL 0
    #endif
    struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
    				pte_t pte)
    
    	unsigned long pfn = pte_pfn(pte);
    
    
    	if (HAVE_PTE_SPECIAL) {
    
    		if (likely(!pte_special(pte)))
    			goto check_pfn;
    
    Hugh Dickins's avatar
    Hugh Dickins committed
    		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
    			return NULL;
    
    		if (!is_zero_pfn(pfn))
    
    			print_bad_pte(vma, addr, pte, NULL);
    
    		return NULL;
    	}
    
    	/* !HAVE_PTE_SPECIAL case follows: */
    
    
    Jared Hulbert's avatar
    Jared Hulbert committed
    	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
    		if (vma->vm_flags & VM_MIXEDMAP) {
    			if (!pfn_valid(pfn))
    				return NULL;
    			goto out;
    		} else {
    
    			unsigned long off;
    			off = (addr - vma->vm_start) >> PAGE_SHIFT;
    
    Jared Hulbert's avatar
    Jared Hulbert committed
    			if (pfn == vma->vm_pgoff + off)
    				return NULL;
    			if (!is_cow_mapping(vma->vm_flags))
    				return NULL;
    		}
    
    	if (is_zero_pfn(pfn))
    		return NULL;
    
    check_pfn:
    	if (unlikely(pfn > highest_memmap_pfn)) {
    		print_bad_pte(vma, addr, pte, NULL);
    		return NULL;
    	}
    
    	 * NOTE! We still have PageReserved() pages in the page tables.
    	 * eg. VDSO mappings can cause them to exist.
    
    Jared Hulbert's avatar
    Jared Hulbert committed
    out:
    
    	return pfn_to_page(pfn);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * copy one vm_area from one task to the other. Assumes the page tables
     * already present in the new task to be cleared in the whole range
     * covered by this vma.
     */
    
    
    static inline void
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    
    		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
    
    		unsigned long addr, int *rss)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	unsigned long vm_flags = vma->vm_flags;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t pte = *src_pte;
    	struct page *page;
    
    	/* pte contains position in swap or file, so copy. */
    	if (unlikely(!pte_present(pte))) {
    		if (!pte_file(pte)) {
    
    			swp_entry_t entry = pte_to_swp_entry(pte);
    
    			swap_duplicate(entry);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			/* make sure dst_mm is on swapoff's mmlist. */
    			if (unlikely(list_empty(&dst_mm->mmlist))) {
    				spin_lock(&mmlist_lock);
    
    				if (list_empty(&dst_mm->mmlist))
    					list_add(&dst_mm->mmlist,
    						 &src_mm->mmlist);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				spin_unlock(&mmlist_lock);
    			}
    
    			if (is_write_migration_entry(entry) &&
    					is_cow_mapping(vm_flags)) {
    				/*
    				 * COW mappings require pages in both parent
    				 * and child to be set to read.
    				 */
    				make_migration_entry_read(&entry);
    				pte = swp_entry_to_pte(entry);
    				set_pte_at(src_mm, addr, src_pte, pte);
    			}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    
    		goto out_set_pte;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	/*
    	 * If it's a COW mapping, write protect it both
    	 * in the parent and the child
    	 */
    
    	if (is_cow_mapping(vm_flags)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		ptep_set_wrprotect(src_mm, addr, src_pte);
    
    		pte = pte_wrprotect(pte);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	/*
    	 * If it's a shared mapping, mark it clean in
    	 * the child
    	 */
    	if (vm_flags & VM_SHARED)
    		pte = pte_mkclean(pte);
    	pte = pte_mkold(pte);
    
    
    	page = vm_normal_page(vma, addr, pte);
    	if (page) {
    		get_page(page);
    
    		page_dup_rmap(page);
    
    		rss[PageAnon(page)]++;
    
    
    out_set_pte:
    	set_pte_at(dst_mm, addr, dst_pte, pte);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pte_t *src_pte, *dst_pte;
    
    	spinlock_t *src_ptl, *dst_ptl;
    
    	int progress = 0;
    
    	int rss[2];
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    again:
    
    	rss[1] = rss[0] = 0;
    
    	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!dst_pte)
    		return -ENOMEM;
    	src_pte = pte_offset_map_nested(src_pmd, addr);
    
    	src_ptl = pte_lockptr(src_mm, src_pmd);
    
    	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
    
    	arch_enter_lazy_mmu_mode();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	do {
    		/*
    		 * We are holding two locks at this point - either of them
    		 * could generate latencies in another task on another CPU.
    		 */
    
    		if (progress >= 32) {
    			progress = 0;
    			if (need_resched() ||
    
    			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (pte_none(*src_pte)) {
    			progress++;
    			continue;
    		}
    
    		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		progress += 8;
    	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
    
    
    	arch_leave_lazy_mmu_mode();
    
    	spin_unlock(src_ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_unmap_nested(src_pte - 1);
    
    	add_mm_rss(dst_mm, rss[0], rss[1]);
    
    	pte_unmap_unlock(dst_pte - 1, dst_ptl);
    	cond_resched();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (addr != end)
    		goto again;
    	return 0;
    }
    
    static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pmd_t *src_pmd, *dst_pmd;
    	unsigned long next;
    
    	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
    	if (!dst_pmd)
    		return -ENOMEM;
    	src_pmd = pmd_offset(src_pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    		if (pmd_none_or_clear_bad(src_pmd))
    			continue;
    		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
    						vma, addr, next))
    			return -ENOMEM;
    	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
    	return 0;
    }
    
    static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pud_t *src_pud, *dst_pud;
    	unsigned long next;
    
    	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
    	if (!dst_pud)
    		return -ENOMEM;
    	src_pud = pud_offset(src_pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    		if (pud_none_or_clear_bad(src_pud))
    			continue;
    		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
    						vma, addr, next))
    			return -ENOMEM;
    	} while (dst_pud++, src_pud++, addr = next, addr != end);
    	return 0;
    }
    
    int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		struct vm_area_struct *vma)
    {
    	pgd_t *src_pgd, *dst_pgd;
    	unsigned long next;
    	unsigned long addr = vma->vm_start;
    	unsigned long end = vma->vm_end;
    
    Andrea Arcangeli's avatar
    Andrea Arcangeli committed
    	int ret;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	/*
    	 * Don't copy ptes where a page fault will fill them correctly.
    	 * Fork becomes much lighter when there are big shared or private
    	 * readonly mappings. The tradeoff is that copy_page_range is more
    	 * efficient than faulting.
    	 */
    
    	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
    
    		if (!vma->anon_vma)
    			return 0;
    	}
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (is_vm_hugetlb_page(vma))
    		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
    
    
    	if (unlikely(is_pfn_mapping(vma))) {
    
    		/*
    		 * We do not free on error cases below as remove_vma
    		 * gets called on error from higher level routine
    		 */
    		ret = track_pfn_vma_copy(vma);
    		if (ret)
    			return ret;
    	}
    
    
    Andrea Arcangeli's avatar
    Andrea Arcangeli committed
    	/*
    	 * We need to invalidate the secondary MMU mappings only when
    	 * there could be a permission downgrade on the ptes of the
    	 * parent mm. And a permission downgrade will only happen if
    	 * is_cow_mapping() returns true.
    	 */
    	if (is_cow_mapping(vma->vm_flags))
    		mmu_notifier_invalidate_range_start(src_mm, addr, end);
    
    	ret = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	dst_pgd = pgd_offset(dst_mm, addr);
    	src_pgd = pgd_offset(src_mm, addr);
    	do {
    		next = pgd_addr_end(addr, end);
    		if (pgd_none_or_clear_bad(src_pgd))
    			continue;
    
    Andrea Arcangeli's avatar
    Andrea Arcangeli committed
    		if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
    					    vma, addr, next))) {
    			ret = -ENOMEM;
    			break;
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
    
    Andrea Arcangeli's avatar
    Andrea Arcangeli committed
    
    	if (is_cow_mapping(vma->vm_flags))
    		mmu_notifier_invalidate_range_end(src_mm,
    						  vma->vm_start, end);
    	return ret;
    
    static unsigned long zap_pte_range(struct mmu_gather *tlb,
    
    				struct vm_area_struct *vma, pmd_t *pmd,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    
    				long *zap_work, struct zap_details *details)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct mm_struct *mm = tlb->mm;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t *pte;
    
    	spinlock_t *ptl;
    
    	int file_rss = 0;
    	int anon_rss = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
    
    	arch_enter_lazy_mmu_mode();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	do {
    		pte_t ptent = *pte;
    
    		if (pte_none(ptent)) {
    			(*zap_work)--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    
    		(*zap_work) -= PAGE_SIZE;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (pte_present(ptent)) {
    
    			struct page *page;
    
    			page = vm_normal_page(vma, addr, ptent);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (unlikely(details) && page) {
    				/*
    				 * unmap_shared_mapping_pages() wants to
    				 * invalidate cache without truncating:
    				 * unmap shared but keep private pages.
    				 */
    				if (details->check_mapping &&
    				    details->check_mapping != page->mapping)
    					continue;
    				/*
    				 * Each page->index must be checked when
    				 * invalidating or truncating nonlinear.
    				 */
    				if (details->nonlinear_vma &&
    				    (page->index < details->first_index ||
    				     page->index > details->last_index))
    					continue;
    			}
    
    			ptent = ptep_get_and_clear_full(mm, addr, pte,
    
    							tlb->fullmm);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tlb_remove_tlb_entry(tlb, pte, addr);
    			if (unlikely(!page))
    				continue;
    			if (unlikely(details) && details->nonlinear_vma
    			    && linear_page_index(details->nonlinear_vma,
    						addr) != page->index)
    
    				set_pte_at(mm, addr, pte,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    					   pgoff_to_pte(page->index));
    			if (PageAnon(page))
    
    				anon_rss--;
    
    			else {
    				if (pte_dirty(ptent))
    					set_page_dirty(page);
    
    				if (pte_young(ptent) &&
    				    likely(!VM_SequentialReadHint(vma)))
    
    					mark_page_accessed(page);
    
    				file_rss--;
    
    			page_remove_rmap(page);
    
    			if (unlikely(page_mapcount(page) < 0))
    				print_bad_pte(vma, addr, ptent, page);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tlb_remove_page(tlb, page);
    			continue;
    		}
    		/*
    		 * If details->check_mapping, we leave swap entries;
    		 * if details->nonlinear_vma, we leave file entries.
    		 */
    		if (unlikely(details))
    			continue;
    
    		if (pte_file(ptent)) {
    			if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
    				print_bad_pte(vma, addr, ptent, NULL);
    		} else if
    		  (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
    			print_bad_pte(vma, addr, ptent, NULL);
    
    		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
    
    	} while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
    
    	add_mm_rss(mm, file_rss, anon_rss);
    
    	arch_leave_lazy_mmu_mode();
    
    	pte_unmap_unlock(pte - 1, ptl);
    
    static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
    
    				struct vm_area_struct *vma, pud_t *pud,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    
    				long *zap_work, struct zap_details *details)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	pmd = pmd_offset(pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    
    		if (pmd_none_or_clear_bad(pmd)) {
    			(*zap_work)--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    		}
    		next = zap_pte_range(tlb, vma, pmd, addr, next,
    						zap_work, details);
    	} while (pmd++, addr = next, (addr != end && *zap_work > 0));
    
    	return addr;
    
    static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
    
    				struct vm_area_struct *vma, pgd_t *pgd,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    
    				long *zap_work, struct zap_details *details)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pud_t *pud;
    	unsigned long next;
    
    	pud = pud_offset(pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    
    		if (pud_none_or_clear_bad(pud)) {
    			(*zap_work)--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    		}
    		next = zap_pmd_range(tlb, vma, pud, addr, next,
    						zap_work, details);
    	} while (pud++, addr = next, (addr != end && *zap_work > 0));
    
    	return addr;
    
    static unsigned long unmap_page_range(struct mmu_gather *tlb,
    				struct vm_area_struct *vma,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    
    				long *zap_work, struct zap_details *details)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pgd_t *pgd;
    	unsigned long next;
    
    	if (details && !details->check_mapping && !details->nonlinear_vma)
    		details = NULL;
    
    	BUG_ON(addr >= end);
    	tlb_start_vma(tlb, vma);
    	pgd = pgd_offset(vma->vm_mm, addr);
    	do {
    		next = pgd_addr_end(addr, end);
    
    		if (pgd_none_or_clear_bad(pgd)) {
    			(*zap_work)--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    		}
    		next = zap_pud_range(tlb, vma, pgd, addr, next,
    						zap_work, details);
    	} while (pgd++, addr = next, (addr != end && *zap_work > 0));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	tlb_end_vma(tlb, vma);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    #ifdef CONFIG_PREEMPT
    # define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
    #else
    /* No preempt: go for improved straight-line efficiency */
    # define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
    #endif
    
    /**
     * unmap_vmas - unmap a range of memory covered by a list of vma's
     * @tlbp: address of the caller's struct mmu_gather
     * @vma: the starting vma
     * @start_addr: virtual address at which to start unmapping
     * @end_addr: virtual address at which to end unmapping
     * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
     * @details: details of nonlinear truncation or shared cache invalidation
     *
    
     * Returns the end address of the unmapping (restart addr if interrupted).
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
    
     * Unmap all pages in the vma list.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
    
     * We aim to not hold locks for too long (for scheduling latency reasons).
     * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * return the ending mmu_gather to the caller.
     *
     * Only addresses between `start' and `end' will be unmapped.
     *
     * The VMA list must be sorted in ascending virtual address order.
     *
     * unmap_vmas() assumes that the caller will flush the whole unmapped address
     * range after unmap_vmas() returns.  So the only responsibility here is to
     * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
     * drops the lock and schedules.
     */
    
    unsigned long unmap_vmas(struct mmu_gather **tlbp,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		struct vm_area_struct *vma, unsigned long start_addr,
    		unsigned long end_addr, unsigned long *nr_accounted,
    		struct zap_details *details)
    {
    
    	long zap_work = ZAP_BLOCK_SIZE;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
    	int tlb_start_valid = 0;
    
    	unsigned long start = start_addr;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;